language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | jpadilla__pyjwt | jwt/exceptions.py | {
"start": 0,
"end": 91
} | class ____(Exception):
"""
Base class for all exceptions
"""
pass
| PyJWTError |
python | huggingface__transformers | tests/models/speech_to_text/test_processing_speech_to_text.py | {
"start": 1209,
"end": 6221
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.tmpdirname = tempfile.mkdtemp()
vocab = ["<s>", "<pad>", "</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est"]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
save_dir = Path(cls.tmpdirname)
save_json(vocab_tokens, save_dir / VOCAB_FILES_NAMES["vocab_file"])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(SAMPLE_SP, save_dir / VOCAB_FILES_NAMES["spm_file"])
tokenizer = Speech2TextTokenizer.from_pretrained(cls.tmpdirname)
tokenizer.save_pretrained(cls.tmpdirname)
feature_extractor_map = {
"feature_size": 24,
"num_mel_bins": 24,
"padding_value": 0.0,
"sampling_rate": 16000,
"return_attention_mask": False,
"do_normalize": True,
}
feature_extractor = Speech2TextFeatureExtractor(**feature_extractor_map)
tokenizer = Speech2TextTokenizer.from_pretrained(cls.tmpdirname)
processor = Speech2TextProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
processor.save_pretrained(cls.tmpdirname)
def get_tokenizer(self, **kwargs):
return Speech2TextTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_feature_extractor(self, **kwargs):
return Speech2TextFeatureExtractor.from_pretrained(self.tmpdirname, **kwargs)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdirname, ignore_errors=True)
def test_save_load_pretrained_default(self):
tokenizer = self.get_tokenizer()
feature_extractor = self.get_feature_extractor()
processor = Speech2TextProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
processor.save_pretrained(self.tmpdirname)
processor = Speech2TextProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer, Speech2TextTokenizer)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor, Speech2TextFeatureExtractor)
def test_save_load_pretrained_additional_features(self):
with tempfile.TemporaryDirectory() as tmpdir:
processor = Speech2TextProcessor(
tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()
)
processor.save_pretrained(tmpdir)
tokenizer_add_kwargs = Speech2TextTokenizer.from_pretrained(tmpdir, bos_token="(BOS)", eos_token="(EOS)")
feature_extractor_add_kwargs = Speech2TextFeatureExtractor.from_pretrained(
tmpdir, do_normalize=False, padding_value=1.0
)
processor = Speech2TextProcessor.from_pretrained(
tmpdir, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0
)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, Speech2TextTokenizer)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor, Speech2TextFeatureExtractor)
def test_feature_extractor(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = Speech2TextProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
raw_speech = floats_list((3, 1000))
input_feat_extract = feature_extractor(raw_speech, return_tensors="np")
input_processor = processor(raw_speech, return_tensors="np")
for key in input_feat_extract:
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2)
def test_tokenizer(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = Speech2TextProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
input_str = "This is a test string"
encoded_processor = processor(text=input_str)
encoded_tok = tokenizer(input_str)
for key in encoded_tok:
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def test_tokenizer_decode(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = Speech2TextProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
decoded_processor = processor.batch_decode(predicted_ids)
decoded_tok = tokenizer.batch_decode(predicted_ids)
self.assertListEqual(decoded_tok, decoded_processor)
| Speech2TextProcessorTest |
python | mlflow__mlflow | mlflow/server/job_api.py | {
"start": 377,
"end": 1497
} | class ____(BaseModel):
"""
Pydantic model for job query response.
"""
job_id: str
creation_time: int
function_fullname: str
params: dict[str, Any]
timeout: float | None
status: JobStatus
result: Any
retry_count: int
last_update_time: int
@classmethod
def from_job_entity(cls, job: JobEntity) -> "Job":
return cls(
job_id=job.job_id,
creation_time=job.creation_time,
function_fullname=job.function_fullname,
params=json.loads(job.params),
timeout=job.timeout,
status=job.status,
result=job.parsed_result,
retry_count=job.retry_count,
last_update_time=job.last_update_time,
)
@job_api_router.get("/{job_id}", response_model=Job)
def get_job(job_id: str) -> Job:
from mlflow.server.jobs import get_job
try:
job = get_job(job_id)
return Job.from_job_entity(job)
except MlflowException as e:
raise HTTPException(
status_code=e.get_http_status_code(),
detail=e.message,
)
| Job |
python | django__django | tests/or_lookups/models.py | {
"start": 308,
"end": 525
} | class ____(models.Model):
headline = models.CharField(max_length=50)
pub_date = models.DateTimeField()
class Meta:
ordering = ("pub_date",)
def __str__(self):
return self.headline
| Article |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/hooks/test_glue.py | {
"start": 23485,
"end": 30286
} | class ____:
RUN_ID = "1234"
RULE_SET_NAME = "test_rule"
RULE_SET_CONFIG = {
"Name": "test_rule",
"Ruleset": 'Rules=[ColumnLength "review_id" = 15]',
"TargetTable": {"DatabaseName": "test_db", "TableName": "test_table"},
"Description": "test rule",
}
def setup_method(self):
self.glue = GlueDataQualityHook()
def test_glue_data_quality_hook(self):
glue_data_quality_hook = GlueDataQualityHook()
assert glue_data_quality_hook.conn is not None
assert glue_data_quality_hook.aws_conn_id == "aws_default"
@mock.patch.object(GlueDataQualityHook, "conn")
def test_data_quality_ruleset_exists(self, mock_conn):
mock_conn.get_data_quality_ruleset.return_value = {"Name": self.RULE_SET_NAME}
result = self.glue.has_data_quality_ruleset(name=self.RULE_SET_NAME)
assert result is True
mock_conn.get_data_quality_ruleset.assert_called_once_with(Name=self.RULE_SET_NAME)
@mock.patch.object(GlueDataQualityHook, "conn")
def test_quality_ruleset_doesnt_exists(self, mock_conn):
error_message = f"Cannot find Data Quality Ruleset in account 1234567 with name {self.RULE_SET_NAME}"
err_response = {"Error": {"Code": "EntityNotFoundException", "Message": error_message}}
exception = boto3.client("glue").exceptions.ClientError(err_response, "test")
returned_exception = type(exception)
mock_conn.exceptions.EntityNotFoundException = returned_exception
mock_conn.get_data_quality_ruleset.side_effect = exception
result = self.glue.has_data_quality_ruleset(name=self.RULE_SET_NAME)
assert result is False
mock_conn.get_data_quality_ruleset.assert_called_once_with(Name=self.RULE_SET_NAME)
@mock.patch.object(AwsBaseHook, "conn")
def test_validate_evaluation_results(self, mock_conn, caplog):
response_evaluation_run = {"RunId": self.RUN_ID, "ResultIds": ["resultId1"]}
response_batch_result = {
"RunId": self.RUN_ID,
"ResultIds": ["resultId1"],
"Results": [
{
"ResultId": "resultId1",
"RulesetName": "rulesetOne",
"RuleResults": [
{
"Name": "Rule_1",
"Description": "RowCount between 150000 and 600000",
"EvaluatedMetrics": {"Dataset.*.RowCount": 300000.0},
"Result": "PASS",
}
],
}
],
}
mock_conn.get_data_quality_ruleset_evaluation_run.return_value = response_evaluation_run
mock_conn.batch_get_data_quality_result.return_value = response_batch_result
with caplog.at_level(logging.INFO, logger=self.glue.log.name):
caplog.clear()
self.glue.validate_evaluation_run_results(evaluation_run_id=self.RUN_ID, show_results=False)
mock_conn.get_data_quality_ruleset_evaluation_run.assert_called_once_with(RunId=self.RUN_ID)
mock_conn.batch_get_data_quality_result.assert_called_once_with(
ResultIds=response_evaluation_run["ResultIds"]
)
assert caplog.messages == [
"AWS Glue data quality ruleset evaluation run, total number of rules failed: 0"
]
@mock.patch.object(AwsBaseHook, "conn")
def test_validate_evaluation_results_should_fail_when_any_rules_failed(self, mock_conn, caplog):
response_batch_result = {
"RunId": self.RUN_ID,
"ResultIds": ["resultId1"],
"Results": [
{
"ResultId": "resultId1",
"RulesetName": "rulesetOne",
"RuleResults": [
{
"Name": "Rule_1",
"Description": "RowCount between 150000 and 600000",
"EvaluatedMetrics": {"Dataset.*.RowCount": 300000.0},
"Result": "PASS",
},
{
"Name": "Rule_2",
"Description": "ColumnLength 'marketplace' between 1 and 2",
"EvaluationMessage": "Value: 9.0 does not meet the constraint requirement!",
"Result": "FAIL",
"EvaluatedMetrics": {
"Column.marketplace.MaximumLength": 9.0,
"Column.marketplace.MinimumLength": 2.0,
},
},
],
}
],
}
response_evaluation_run = {"RunId": self.RUN_ID, "ResultIds": ["resultId1"]}
mock_conn.get_data_quality_ruleset_evaluation_run.return_value = response_evaluation_run
mock_conn.batch_get_data_quality_result.return_value = response_batch_result
with caplog.at_level(logging.INFO, logger=self.glue.log.name):
caplog.clear()
with pytest.raises(
AirflowException,
match="AWS Glue data quality ruleset evaluation run failed for one or more rules",
):
self.glue.validate_evaluation_run_results(evaluation_run_id=self.RUN_ID, show_results=False)
mock_conn.get_data_quality_ruleset_evaluation_run.assert_called_once_with(RunId=self.RUN_ID)
mock_conn.batch_get_data_quality_result.assert_called_once_with(
ResultIds=response_evaluation_run["ResultIds"]
)
assert caplog.messages == [
"AWS Glue data quality ruleset evaluation run, total number of rules failed: 1"
]
@mock.patch.object(GlueDataQualityHook, "conn")
def test_log_recommendation_results(self, glue_data_quality_hook_mock_conn, caplog):
rules = """ Rules = [
RowCount between 2 and 8,
IsComplete "name"
]
"""
glue_data_quality_hook_mock_conn.get_data_quality_rule_recommendation_run.return_value = {
"RunId": self.RUN_ID,
"DataSource": {"GlueTable": {"DatabaseName": "TestDB", "TableName": "TestTable"}},
"RecommendedRuleset": rules,
}
with caplog.at_level(logging.INFO, logger=self.glue.log.name):
self.glue.log_recommendation_results(run_id=self.RUN_ID)
glue_data_quality_hook_mock_conn.get_data_quality_rule_recommendation_run.assert_called_once_with(
RunId=self.RUN_ID
)
assert rules in caplog.messages
| TestGlueDataQualityHook |
python | huggingface__transformers | src/transformers/models/rwkv/modeling_rwkv.py | {
"start": 28090,
"end": 33191
} | class ____(RwkvPreTrainedModel, GenerationMixin):
_tied_weights_keys = {"head.weight": "rwkv.embeddings.weight"}
def __init__(self, config):
super().__init__(config)
self.rwkv = RwkvModel(config)
self.head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.head
def set_output_embeddings(self, new_embeddings):
self.head = new_embeddings
def prepare_inputs_for_generation(self, input_ids, state=None, inputs_embeds=None, use_cache=None, **kwargs):
# Overwritten -- this model uses `state`, but doesn't have a cache (`past_key_values`)
# only last token for inputs_ids if the state is passed along.
if state is not None:
input_ids = input_ids[:, -1].unsqueeze(-1)
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
if inputs_embeds is not None and state is None:
model_inputs = {"inputs_embeds": inputs_embeds}
else:
model_inputs = {"input_ids": input_ids}
model_inputs["state"] = state
model_inputs["use_cache"] = use_cache
# Forward ALL kwargs that are uninitialized (e.g. `use_cache`).
for key, value in kwargs.items():
if key not in model_inputs:
model_inputs[key] = value
return model_inputs
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
state: Optional[list[torch.FloatTensor]] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs,
) -> Union[tuple, RwkvCausalLMOutput]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else
`past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input
sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
state (tuple of five `torch.FloatTensor` of shape `(batch_size, hidden_size, num_hidden_layers)`, *optional*):
If passed along, the model uses the previous state in all the blocks (which will give the output for the
`input_ids` provided as if the model add `state_input_ids + input_ids` as context).
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
use_cache (`bool`, *optional*):
If set to `True`, the last state is returned and can be used to quickly generate the next logits.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
rwkv_outputs = self.rwkv(
input_ids,
inputs_embeds=inputs_embeds,
state=state,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = rwkv_outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
if not return_dict:
output = (logits,) + rwkv_outputs[1:]
return ((loss,) + output) if loss is not None else output
return RwkvCausalLMOutput(
loss=loss,
logits=logits,
state=rwkv_outputs.state,
hidden_states=rwkv_outputs.hidden_states,
attentions=rwkv_outputs.attentions,
)
__all__ = ["RwkvForCausalLM", "RwkvModel", "RwkvPreTrainedModel"]
| RwkvForCausalLM |
python | falconry__falcon | tests/asgi/test_scope.py | {
"start": 193,
"end": 7126
} | class ____:
def items(self):
return [('foo', 'bar')]
def test_missing_asgi_version():
scope = testing.create_scope()
del scope['asgi']
resource = _call_with_scope(scope)
# NOTE(kgriffs): According to the ASGI spec, the version should
# default to "2.0".
assert resource.captured_req.scope['asgi']['version'] == '2.0'
@pytest.mark.parametrize('http_version', ['0.9', '1.9', '4.0', '1337'])
def test_unsupported_http_version(http_version):
scope = testing.create_scope()
scope['http_version'] = http_version
with pytest.raises(UnsupportedError):
_call_with_scope(scope)
@pytest.mark.parametrize(
'version, supported',
[
('3.0', True),
('3.1', True),
('3.10', True),
('30.0', False),
('31.0', False),
('4.0', False),
('4.1', False),
('4.10', False),
('40.0', False),
('41.0', False),
('2.0', False),
('2.1', False),
('2.10', False),
(None, False),
],
)
def test_supported_asgi_version(version, supported):
scope = {
'type': 'lifespan',
'asgi': {'spec_version': '2.0', 'version': version},
}
if version is None:
del scope['asgi']['version']
app = App()
resource = testing.SimpleTestResourceAsync()
app.add_route('/', resource)
shutting_down = asyncio.Condition()
req_event_emitter = testing.ASGILifespanEventEmitter(shutting_down)
resp_event_collector = testing.ASGIResponseEventCollector()
async def task():
coro = asyncio.create_task(app(scope, req_event_emitter, resp_event_collector))
# NOTE(vytas): Yield to the lifespan task above.
await asyncio.sleep(0)
assert len(resp_event_collector.events) == 1
event = resp_event_collector.events[0]
if supported:
assert event['type'] == 'lifespan.startup.complete'
else:
assert event['type'] == 'lifespan.startup.failed'
assert event['message'].startswith('Falcon requires ASGI version 3.x')
async with shutting_down:
shutting_down.notify()
await coro
falcon.async_to_sync(task)
@pytest.mark.parametrize('scope_type', ['tubes', 'http3', 'htt'])
def test_unsupported_scope_type(scope_type):
scope = testing.create_scope()
scope['type'] = scope_type
with pytest.raises(UnsupportedScopeError):
_call_with_scope(scope)
@pytest.mark.parametrize(
'spec_version, supported',
[
('0.0', False),
('1.0', False),
('11.0', False),
('2.0', True),
('2.1', True),
('2.10', True),
('20.0', False),
('22.0', False),
('3.0', False),
('3.1', False),
('30.0', False),
],
)
def test_supported_http_spec(spec_version, supported):
scope = testing.create_scope()
scope['asgi']['spec_version'] = spec_version
if supported:
_call_with_scope(scope)
else:
with pytest.raises(UnsupportedScopeError):
_call_with_scope(scope)
def test_lifespan_scope_default_version():
app = App()
resource = testing.SimpleTestResourceAsync()
app.add_route('/', resource)
shutting_down = asyncio.Condition()
req_event_emitter = testing.ASGILifespanEventEmitter(shutting_down)
resp_event_collector = testing.ASGIResponseEventCollector()
scope = {'type': 'lifespan'}
async def t():
t = asyncio.create_task(app(scope, req_event_emitter, resp_event_collector))
# NOTE(kgriffs): Yield to the lifespan task above
await asyncio.sleep(0.001)
async with shutting_down:
shutting_down.notify()
await t
falcon.async_to_sync(t)
assert not resource.called
@pytest.mark.parametrize(
'spec_version, supported',
[
('0.0', False),
('1.0', True),
('1.1', True),
('1.10', True),
('2.0', True),
('2.1', True),
('2.10', True),
('3.0', False),
('4.0', False),
('11.0', False),
('22.0', False),
],
)
def test_lifespan_scope_version(spec_version, supported):
app = App()
shutting_down = asyncio.Condition()
req_event_emitter = testing.ASGILifespanEventEmitter(shutting_down)
resp_event_collector = testing.ASGIResponseEventCollector()
scope = {
'type': 'lifespan',
'asgi': {'spec_version': spec_version, 'version': '3.0'},
}
if not supported:
with pytest.raises(UnsupportedScopeError):
falcon.async_to_sync(
app.__call__, scope, req_event_emitter, resp_event_collector
)
return
async def t():
t = asyncio.create_task(app(scope, req_event_emitter, resp_event_collector))
# NOTE(kgriffs): Yield to the lifespan task above
await asyncio.sleep(0.001)
async with shutting_down:
shutting_down.notify()
await t
falcon.async_to_sync(t)
def test_query_string_values():
with pytest.raises(ValueError):
testing.create_scope(query_string='?catsup=y')
with pytest.raises(ValueError):
testing.create_scope(query_string='?')
for qs in ('', None):
scope = testing.create_scope(query_string=qs)
assert scope['query_string'] == b''
resource = _call_with_scope(scope)
assert resource.captured_req.query_string == ''
qs = 'a=1&b=2&c=%3E%20%3C'
scope = testing.create_scope(query_string=qs)
assert scope['query_string'] == qs.encode()
resource = _call_with_scope(scope)
assert resource.captured_req.query_string == qs
@pytest.mark.parametrize(
'scheme, valid',
[
('http', True),
('https', True),
('htt', False),
('http:', False),
('https:', False),
('ftp', False),
('gopher', False),
],
)
def test_scheme(scheme, valid):
if valid:
testing.create_scope(scheme=scheme)
else:
with pytest.raises(ValueError):
testing.create_scope(scheme=scheme)
@pytest.mark.parametrize('cookies', [{'foo': 'bar', 'baz': 'foo'}, CustomCookies()])
def test_cookies(cookies):
scope = testing.create_scope(cookies=cookies)
assert any(header == b'cookie' for header, _ in scope['headers'])
def test_cookies_options_method():
scope = testing.create_scope(method='OPTIONS', cookies={'foo': 'bar'})
assert not any(header == b'cookie' for header, _ in scope['headers'])
def _call_with_scope(scope):
app = App()
resource = testing.SimpleTestResourceAsync()
app.add_route('/', resource)
req_event_emitter = testing.ASGIRequestEventEmitter()
resp_event_collector = testing.ASGIResponseEventCollector()
falcon.async_to_sync(app.__call__, scope, req_event_emitter, resp_event_collector)
assert resource.called
return resource
| CustomCookies |
python | huggingface__transformers | tests/models/fsmt/test_modeling_fsmt.py | {
"start": 1580,
"end": 5094
} | class ____:
def __init__(
self,
parent,
src_vocab_size=99,
tgt_vocab_size=99,
langs=["ru", "en"],
batch_size=13,
seq_length=7,
is_training=False,
use_labels=False,
hidden_size=16,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=4,
hidden_act="relu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=20,
bos_token_id=0,
pad_token_id=1,
eos_token_id=2,
):
self.parent = parent
self.src_vocab_size = src_vocab_size
self.tgt_vocab_size = tgt_vocab_size
self.langs = langs
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.bos_token_id = bos_token_id
self.pad_token_id = pad_token_id
self.eos_token_id = eos_token_id
torch.manual_seed(0)
# hack needed for modeling_common tests - despite not really having this attribute in this model
self.vocab_size = self.src_vocab_size
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.src_vocab_size).clamp(
3,
)
input_ids[:, -1] = 2 # Eos Token
config = self.get_config()
inputs_dict = prepare_fsmt_inputs_dict(config, input_ids)
return config, inputs_dict
def get_config(self):
return FSMTConfig(
vocab_size=self.src_vocab_size, # hack needed for common tests
src_vocab_size=self.src_vocab_size,
tgt_vocab_size=self.tgt_vocab_size,
langs=self.langs,
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
)
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
inputs_dict["decoder_input_ids"] = inputs_dict["input_ids"]
inputs_dict["decoder_attention_mask"] = inputs_dict["attention_mask"]
inputs_dict["use_cache"] = False
return config, inputs_dict
def prepare_fsmt_inputs_dict(
config,
input_ids,
attention_mask=None,
):
if attention_mask is None:
attention_mask = input_ids.ne(config.pad_token_id)
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
}
@require_torch
| FSMTModelTester |
python | PrefectHQ__prefect | src/integrations/prefect-snowflake/prefect_snowflake/experimental/workers/spcs.py | {
"start": 11798,
"end": 16412
} | class ____(BaseVariables):
"""Defines variables which can be overridden by deployments.
Must include all variables in SPCSWorkerConfiguration. All validation should happen in SPCSWorkerConfiguration.
"""
image: str = Field(
default_factory=get_prefect_image_name,
description="The image to use for the Prefect container in the task. This value defaults to a Prefect base image matching your local versions.",
examples=["docker.io/prefecthq/prefect:3-latest"],
)
image_registry: str | None = Field(
default=None,
description="The fully qualified name of the Snowflake image registry.",
)
entrypoint: str | None = Field(
default=DEFAULT_CONTAINER_ENTRYPOINT,
description=(
"The entrypoint of the container you wish to run. "
"This value defaults to the entrypoint used by Prefect images and should only be changed when using a custom image that is not based on an official Prefect image."
),
)
snowflake_credentials: SnowflakeCredentials = Field(
default_factory=SnowflakeCredentials,
description="Snowflake credentials to use when creating job services.",
)
secrets: list[dict[str, str]] = Field(
default_factory=list,
description="Snowflake secrets to inject into the container as env variables or files.",
)
external_access_integrations: list[str] = Field(
default_factory=list,
description="The names of the external access integrations that the service should be created with.",
)
compute_pool: str | None = Field(
default=None,
description="The fully-qualified name of the compute pool to run services in.",
examples=["common.compute.my_compute_pool"],
)
cpu_request: str = Field(
default=SPCS_DEFAULT_CPU_REQUEST,
description=f"CPU allocation request for the job service. If not provided, a default value of {SPCS_DEFAULT_CPU_REQUEST} will be used.",
)
cpu_limit: str | None = Field(
default=SPCS_DEFAULT_CPU_LIMIT,
description="CPU allocation limit for the job service. If not provided, there will be no limit.",
)
gpu_count: int | None = Field(
default=None,
description="The number of GPUs to use. If not provided, no GPUs will be used.",
)
memory_request: str = Field(
default=SPCS_DEFAULT_MEMORY_REQUEST,
description=(
"Memory allocation request for the job service. "
f"If not provided, a default value of {SPCS_DEFAULT_MEMORY_REQUEST} will be used unless present on the task definition."
),
)
memory_limit: str | None = Field(
default=SPCS_DEFAULT_MEMORY_LIMIT,
description="Memory allocation limit for the job service. If not provided, defaults to the same value as memory_request.",
)
volume_mounts: list[dict[str, str]] = Field(
default_factory=list,
description="Defines where volumes appears in the container.",
)
volumes: list[dict[str, Any]] = Field(
default_factory=list,
description="Shared file system that can be made available in your container.",
)
log_level: Literal["INFO", "ERROR", "NONE"] = Field(
default="INFO",
description="The level of application logs that Snowflake should collect and export to your event table.",
)
metrics_groups: list[str] = Field(
default_factory=list,
description="The list of predefined metrics groups that should be logged to the event table.",
)
query_warehouse: str | None = Field(
default=None, description="The query warehouse of the job service."
)
service_comment: str | None = Field(
default=None,
description="Specify a comment for the job service. Visible in certain Snowflake logs.",
)
stream_output: bool = Field(
default=False,
description="Direct flow log output back to the worker's console.",
)
pool_start_timeout_seconds: int = Field(
default=600,
description="The number of seconds to wait for the compute pool to start before considering the run failed.",
)
service_start_timeout_seconds: int = Field(
default=300,
description="The number of seconds to wait for the job service to start before considering the run failed.",
)
service_watch_poll_interval: int = Field(
default=5,
description="The number of seconds to wait between Snowflake API calls while monitoring the state of the service.",
)
| SPCSServiceTemplateVariables |
python | django__django | tests/gis_tests/gis_migrations/test_commands.py | {
"start": 127,
"end": 2491
} | class ____(TransactionTestCase):
"""
Tests running the migrate command in GeoDjango.
"""
available_apps = ["gis_tests.gis_migrations"]
def get_table_description(self, table):
with connection.cursor() as cursor:
return connection.introspection.get_table_description(cursor, table)
def assertTableExists(self, table):
with connection.cursor() as cursor:
self.assertIn(table, connection.introspection.table_names(cursor))
def assertTableNotExists(self, table):
with connection.cursor() as cursor:
self.assertNotIn(table, connection.introspection.table_names(cursor))
def test_migrate_gis(self):
"""
Tests basic usage of the migrate command when a model uses GeoDjango
fields (#22001).
It's also used to showcase an error in migrations where spatialite is
enabled and geo tables are renamed resulting in unique constraint
failure on geometry_columns (#23030).
"""
# The right tables exist
self.assertTableExists("gis_migrations_neighborhood")
self.assertTableExists("gis_migrations_household")
self.assertTableExists("gis_migrations_family")
if connection.features.supports_raster:
self.assertTableExists("gis_migrations_heatmap")
# Unmigrate models.
call_command("migrate", "gis_migrations", "0001", verbosity=0)
# All tables are gone
self.assertTableNotExists("gis_migrations_neighborhood")
self.assertTableNotExists("gis_migrations_household")
self.assertTableNotExists("gis_migrations_family")
if connection.features.supports_raster:
self.assertTableNotExists("gis_migrations_heatmap")
# Even geometry columns metadata
try:
GeoColumn = connection.ops.geometry_columns()
except NotImplementedError:
# Not all GIS backends have geometry columns model
pass
else:
qs = GeoColumn.objects.filter(
**{
"%s__in"
% GeoColumn.table_name_col(): ["gis_neighborhood", "gis_household"]
}
)
self.assertEqual(qs.count(), 0)
# Revert the "unmigration"
call_command("migrate", "gis_migrations", verbosity=0)
| MigrateTests |
python | dagster-io__dagster | docs/sphinx/_ext/sphinx-mdx-builder/sphinxcontrib/mdxbuilder/writers/mdx.py | {
"start": 4333,
"end": 43057
} | class ____(SphinxTranslator):
def __init__(self, document: nodes.document, builder: "MdxBuilder") -> None:
super().__init__(document, builder)
self.sectionlevel = 0
self.nl = "\n"
self.messages: list[str] = []
self._warned: set[str] = set()
self.states: list[list[tuple[int, str | list[str]]]] = [[]]
self.stateindent = [0]
self.context: list[str] = []
self.list_counter: list[int] = []
self.in_literal = 0
self.in_literal_block = 0
self.in_list_item = 0
self.desc_count = 0
self.max_line_width = self.config.mdx_max_line_width or 120
self.github_url = (
self.config.mdx_github_url or "https://github.com/dagster-io/dagster/blob/master"
)
self.show_source_links = getattr(self.config, "mdx_show_source_links", True)
self.special_characters = {
ord("<"): "<",
ord('"'): """,
ord(">"): ">",
}
def _unwrap_function_object(self, obj):
"""Attempt to unwrap function-like objects to get the underlying callable.
This method handles various common patterns used by decorators and frameworks
that wrap functions in custom objects while preserving access to the original function.
Args:
obj: The object to unwrap
Returns:
The unwrapped function if found, otherwise the original object
"""
# Common patterns for accessing wrapped functions
function_attributes = [
# Standard functools.wraps pattern (already handled above via __wrapped__)
"__wrapped__",
# Common patterns in various frameworks:
"func", # Used by many decorators
"function", # Alternative naming
"__func__", # Method objects
"fget", # Property objects
"__call__", # Callable objects (last resort)
# Generic function storage patterns:
"decorated", # Generic decorated function access
"inner", # Generic inner function access
"wrapped", # Alternative to __wrapped__
"_func", # Private function storage
"_fn", # Private function storage (short form)
"callback", # Callback-style wrappers
"handler", # Handler-style wrappers
"target", # Target function in wrappers
"original", # Original function reference
"impl", # Implementation function
"implementation", # Full implementation name
# Common framework patterns (type_fn suffix pattern):
"fn", # Simple fn suffix
"logger_fn", # Any logger-type function
"main_fn", # Main function reference
"exec_fn", # Execution function
"run_fn", # Run function
"call_fn", # Call function
]
current_obj = obj
# Try each attribute pattern
for attr_name in function_attributes:
if hasattr(current_obj, attr_name):
potential_func = getattr(current_obj, attr_name)
# Verify it's actually a callable and has source code
if callable(potential_func):
try:
# Test if we can get source info from this object
if inspect.getsourcefile(potential_func):
current_obj = potential_func
break
except (TypeError, OSError):
# If this attribute doesn't have source info, continue to next
continue
# Handle callable objects that might contain the function logic
# but only if we haven't found a better candidate
if current_obj is obj and callable(obj) and not inspect.isfunction(obj):
# For callable objects, try to access their __call__ method's source
# This is a last resort and might not always work
if hasattr(obj, "__call__") and hasattr(obj.__call__, "__func__"):
try:
call_method = obj.__call__.__func__
if inspect.getsourcefile(call_method):
current_obj = call_method
except (TypeError, OSError):
pass
return current_obj
def _find_dagster_repo_root(self, source_file: str) -> Optional[str]:
"""Find the Dagster repository root by looking for python_modules directory.
Args:
source_file: Path to the source file
Returns:
Path to the repository root or None if not found
"""
current_dir = os.path.dirname(os.path.abspath(source_file))
while current_dir and current_dir != os.path.dirname(current_dir):
# Check if this directory contains python_modules
python_modules_path = os.path.join(current_dir, "python_modules")
if os.path.isdir(python_modules_path):
# Additional validation: check for dagster package within python_modules
dagster_path = os.path.join(python_modules_path, "dagster")
if os.path.isdir(dagster_path):
return current_dir
# Move up one directory
current_dir = os.path.dirname(current_dir)
return None
############################################################
# Utility and State Methods
############################################################
def add_text(self, text: str) -> None:
self.states[-1].append((-1, text))
def get_source_github_url(self, objname: str, modname: str, fullname: str) -> Optional[str]:
"""Generate a GitHub URL for a Python object.
Args:
objname: Name of the object
modname: Module name
fullname: Full qualified name
Returns:
A URL to the GitHub source or None if not available
"""
if not self.show_source_links or not modname:
return None
try:
module = __import__(modname, fromlist=[""])
obj = module
parts = fullname.split(".")
# Navigate to the actual object
for part in parts:
if hasattr(obj, part):
obj = getattr(obj, part)
if not obj:
logger.warning(f"No object for {fullname}")
return None
# Don't unwrap enum classes as they should point to their definition
from enum import Enum
if not (isinstance(obj, type) and issubclass(obj, Enum)):
# unwrap the root function if function is wrapped
while hasattr(obj, "__wrapped__"):
obj = obj.__wrapped__
# Handle various patterns of function-wrapping objects
obj = self._unwrap_function_object(obj)
try:
source_file = inspect.getsourcefile(obj)
if not source_file:
logger.warning(f"No source file for {fullname}")
return None
# Find the repository root by looking for python_modules directory
repo_root = self._find_dagster_repo_root(source_file)
if repo_root:
# Calculate relative path from repository root
repo_path = os.path.relpath(source_file, repo_root)
else:
# Fallback to original behavior if repo root not found
repo_path = os.path.relpath(source_file).replace("../", "")
source_line = inspect.getsourcelines(obj)[1]
return f"{self.github_url}/{repo_path}#L{source_line}"
except (TypeError, OSError):
return None
except Exception as e:
logger.debug(f"Error generating source link for {fullname}: {e}")
return None
def new_state(self, indent: int = STDINDENT) -> None:
self.states.append([])
self.stateindent.append(indent)
def log_visit(self, node: Element | str) -> None:
"""Utility to log the visit to a node."""
if isinstance(node, Element):
node_type = node.__class__.__name__
else:
node_type = node
self.add_text(f"---------visit: {node_type}")
def log_depart(self, node: Element | str) -> None:
if isinstance(node, Element):
node_type = node.__class__.__name__
else:
node_type = node
self.add_text(f"---------depart: {node_type}")
def attval(self, text, whitespace=re.compile("[\n\r\t\v\f]")):
"""Cleanse, HTML encode, and return attribute value text."""
encoded = self.encode(whitespace.sub(" ", text))
return encoded
def encode(self, text):
"""Encode special characters in `text` & return."""
# Use only named entities known in both XML and HTML
# other characters are automatically encoded "by number" if required.
# @@@ A codec to do these and all other HTML entities would be nice.
text = str(text)
return text.translate(self.special_characters)
# From:https://github.com/docutils/docutils/blob/master/docutils/docutils/writers/_html_base.py
def starttag(self, node, tagname, suffix="\n", empty=False, **attributes):
"""Construct and return a start tag given a node (id & class attributes
are extracted), tag name, and optional attributes.
"""
tagname = tagname.lower()
prefix = []
atts = {}
for name, value in attributes.items():
atts[name.lower()] = value
classes = atts.pop("classes", [])
languages = []
# unify class arguments and move language specification
for cls in node.get("classes", []) + atts.pop("class", "").split():
if cls.startswith("language-"):
languages.append(cls.removeprefix("language-"))
elif cls.strip() and cls not in classes:
classes.append(cls)
if languages:
# attribute name is 'lang' in XHTML 1.0 but 'xml:lang' in 1.1
atts[self.lang_attribute] = languages[0]
# filter classes that are processed by the writer:
internal = ("colwidths-auto", "colwidths-given", "colwidths-grid")
if isinstance(node, nodes.table):
classes = [cls for cls in classes if cls not in internal]
if classes:
atts["class"] = " ".join(classes)
assert "id" not in atts
ids = node.get("ids", [])
ids.extend(atts.pop("ids", []))
if ids:
atts["id"] = ids[0]
for id in ids[1:]:
# Add empty "span" elements for additional IDs. Note
# that we cannot use empty "a" elements because there
# may be targets inside of references, but nested "a"
# elements aren't allowed in XHTML (even if they do
# not all have a "href" attribute).
if empty or isinstance(node, (nodes.Sequential, nodes.docinfo, nodes.table)):
# Insert target right in front of element.
prefix.append(f'<Link id="{id}"></Link>')
else:
# Non-empty tag. Place the auxiliary <span> tag
# *inside* the element, as the first child.
suffix += f'<Link id="{id}"></Link>'
attlist = sorted(atts.items())
parts = [tagname]
for name, value in attlist:
# value=None was used for boolean attributes without
# value, but this isn't supported by XHTML.
assert value is not None
if isinstance(value, list):
values = [str(v) for v in value]
parts.append('{}="{}"'.format(name.lower(), self.attval(" ".join(values))))
else:
parts.append(f'{name.lower()}="{self.attval(str(value))}"')
if empty:
infix = " /"
else:
infix = ""
return "".join(prefix) + "<{}{}>".format(" ".join(parts), infix) + suffix
def end_state(
self,
wrap: bool = True,
end: Sequence[str] | None = ("",),
first: str | None = None,
) -> None:
if len(self.stateindent) == 0:
self.stateindent = [0]
content = self.states.pop()
maxindent = sum(self.stateindent)
indent = self.stateindent.pop()
result: list[tuple[int, list[str]]] = []
toformat: list[str] = []
def do_format() -> None:
if not toformat:
return
if wrap:
res = my_wrap("".join(toformat), width=self.max_line_width - maxindent)
else:
res = "".join(toformat).splitlines()
if end:
res += end
result.append((indent, res))
for itemindent, item in content:
if itemindent == -1:
toformat.append(item) # type: ignore[arg-type]
else:
do_format()
result.append((indent + itemindent, item)) # type: ignore[arg-type]
toformat = []
do_format()
if first is not None and result:
# insert prefix into first line (ex. *, [1], See also, etc.)
newindent = result[0][0] - indent
if result[0][1] == [""]:
result.insert(0, (newindent, [first]))
else:
text = first + result[0][1].pop(0)
result.insert(0, (newindent, [text]))
if len(self.states) >= 1:
self.states[-1].extend(result)
else:
self.states.append([])
self.states[-1].extend(result)
def unknown_visit(self, node: Element) -> None:
node_type = node.__class__.__name__
if node_type not in self._warned:
super().unknown_visit(node)
self._warned.add(node_type)
raise nodes.SkipNode
def visit_Text(self, node: nodes.Text) -> None:
if isinstance(node.parent, nodes.reference):
return
content = node.astext()
# Prevents wrapped lines in list items, for example in parameter descriptions:
#
# Args:
# group_name (Optional[str], optional): The name of the asset group.
# some_translator (Optional[SomeTranslator], optional): The translator to use
# to convert content into :py:class:`some.Spec`.
# Defaults to :py:class:`SomeTranslator`.
if self.in_list_item:
content = content.replace("\n", " ")
if self.in_literal and not self.in_literal_block:
content = content.replace("<", "\\<").replace("{", "\\{")
self.add_text(content)
def depart_Text(self, node: Element) -> None:
pass
def visit_document(self, node: Element) -> None:
self.new_state(0)
def depart_document(self, node: Element) -> None:
title = next(iter(node.nameids.keys()), "Dagster Python API Reference")
title_suffix = self.builder.config.mdx_title_suffix
title_meta = self.builder.config.mdx_title_meta
meta_description = self.builder.config.mdx_description_meta
# Display index files at the top of their sections
if "index.rst" in node.attributes["source"]:
sidebar_position = True
else:
sidebar_position = self.builder.config.mdx_sidebar_position
# Escape single quotes in strings
title = title.replace("'", "\\'")
if title_suffix:
title_suffix = title_suffix.replace("'", "\\'")
if title_meta:
title_meta = title_meta.replace("'", "\\'")
if meta_description:
meta_description = meta_description.replace("'", "\\'")
frontmatter = "---\n"
frontmatter += f"title: '{title}"
if title_suffix:
frontmatter += f" {title_suffix}"
frontmatter += "'\n"
if sidebar_position:
frontmatter += "sidebar_position: 1\n"
if title_meta:
frontmatter += f"title_meta: '{title}{title_meta}'\n"
if meta_description:
frontmatter += f"description: '{title}{meta_description}'\n"
last_update = datetime.now().strftime("%Y-%m-%d")
frontmatter += "last_update:\n"
frontmatter += f" date: '{last_update}'\n"
# prevent API docs from having `Edit this page` link; in the future we may want to configure this to point to the `.rst` file
frontmatter += "custom_edit_url: null\n"
frontmatter += "---\n\n"
self.end_state()
self.body = frontmatter
self.body += self.nl.join(
line and (" " * indent + line) for indent, lines in self.states[0] for line in lines
)
if self.messages:
logger.info("---MDX Translator messages---")
for msg in self.messages:
logger.info(msg)
logger.info("---End MDX Translator messages---")
def visit_section(self, node: Element) -> None:
self.end_state(wrap=False, end="\n")
self.new_state(0)
self.sectionlevel += 1
self.add_text(self.starttag(node, "div", CLASS="section"))
def depart_section(self, node: Element) -> None:
self.sectionlevel -= 1
self.add_text("</div>")
def visit_topic(self, node: Element) -> None:
self.new_state(0)
def depart_topic(self, node: Element) -> None:
self.end_state(wrap=False)
visit_sidebar = visit_topic
depart_sidebar = depart_topic
def visit_rubric(self, node: Element) -> None:
# Add blank line before rubrics to separate sections (e.g., between Returns and Examples)
self.add_text(self.nl)
self.new_state(0)
def depart_rubric(self, node: Element) -> None:
self.add_text(":")
self.end_state()
def visit_compound(self, node: Element) -> None:
pass
def depart_compound(self, node: Element) -> None:
pass
def visit_glossary(self, node: Element) -> None:
pass
def depart_glossary(self, node: Element) -> None:
pass
def visit_title(self, node: Element) -> None:
if isinstance(node.parent, nodes.Admonition):
self.add_text(node.astext() + ": ")
raise nodes.SkipNode
self.new_state(0)
def depart_title(self, node: Element) -> None:
prefix = "#" * (self.sectionlevel) + " "
self.end_state(first=prefix)
def visit_subtitle(self, node: Element) -> None:
pass
def depart_subtitle(self, node: Element) -> None:
pass
def visit_attribution(self, node: Element) -> None:
pass
def depart_attribution(self, node: Element) -> None:
pass
#############################################################
# Domain-specific object descriptions
#############################################################
# Top-level nodes
#################
# desc contains 1* desc_signature and a desc_content
# desc_signature default single line signature
# desc_signature_line node for line in multi-line signature
# desc_content last child node, object description
# desc_inline sig fragment in inline text
def visit_desc(self, node: Element) -> None:
self.in_literal += 1
self.desc_count += 1
self.new_state(0)
self.add_text("<dl>")
def depart_desc(self, node: Element) -> None:
self.in_literal -= 1
self.add_text("</dl>")
self.end_state(wrap=False, end=None)
self.desc_count -= 1
def visit_desc_signature(self, node: Element) -> None:
self.in_literal += 1
self.new_state()
ids = node.get("ids")
if ids:
self.add_text(f"<dt><Link class=\"anchor\" id='{ids[0]}'>")
else:
self.add_text("<dt>")
def depart_desc_signature(self, node: Element) -> None:
self.in_literal -= 1
ids = node.get("ids")
# Add source link if available
module = node.get("module")
fullname = node.get("fullname")
objname = node.get("objname", "")
if self.show_source_links and module and fullname:
github_url = self.get_source_github_url(objname, module, fullname)
if github_url:
self.add_text(
f" <a href='{github_url}' className='source-link' target='_blank' rel='noopener noreferrer'>[source]</a>"
)
if ids:
self.add_text(f'<a href="#{ids[0]}" class="hash-link"></a>')
self.add_text("</Link></dt>")
else:
self.add_text("</dt>")
self.end_state(wrap=False, end=None)
def visit_desc_signature_line(self, node: Element) -> None:
pass
def depart_desc_signature_line(self, node: Element) -> None:
pass
def visit_desc_content(self, node: Element) -> None:
self.in_literal += 1
self.new_state()
self.add_text("<dd>\n")
def depart_desc_content(self, node: Element) -> None:
self.in_literal -= 1
self.add_text("\n")
self.add_text("</dd>")
self.end_state(wrap=False)
def visit_desc_inline(self, node: Element) -> None:
self.add_text("<span>")
def depart_desc_inline(self, node: Element) -> None:
self.add_text("</span>")
def visit_desc_sig_space(self, node: Element) -> None:
pass
def depart_desc_sig_space(self, node: Element) -> None:
pass
# High-level structure in signaturs
#################
# desc_name: main object name, e.g. MyModule.MyClass, the main name is MyClass.
# desc_addname: additional name, e.g. MyModle.MyClass, the additional name is MyModule
# desc_type: node for return types
# desc_returns: node for return types
# desc_parameterlist: node for parameter list
# desc_parameter: node for a single parameter
# desc_optional: node for optional parts of the param list
# desc_annotation: node for signature anootations
def visit_desc_name(self, node: Element) -> None:
pass
def depart_desc_name(self, node: Element) -> None:
pass
def visit_desc_addname(self, node: Element) -> None:
pass
def depart_desc_addname(self, node: Element) -> None:
pass
def visit_desc_type(self, node: Element) -> None:
pass
def depart_desc_type(self, node: Element) -> None:
pass
def visit_desc_returns(self, node: Element) -> None:
self.add_text(" -> ")
def depart_desc_returns(self, node: Element) -> None:
pass
def visit_desc_parameterlist(self, node: Element) -> None:
raise nodes.SkipNode
def depart_desc_parameterlist(self, node: Element) -> None:
pass
def visit_desc_type_parameterlist(self, node: Element) -> None:
pass
def depart_desc_type_parameterlist(self, node: Element) -> None:
pass
def visit_desc_parameter(self, node: Element) -> None:
pass
def depart_desc_parameter(self, node: Element) -> None:
pass
def visit_desc_type_parameter(self, node: Element) -> None:
pass
def depart_desc_type_parameter(self, node: Element) -> None:
pass
def visit_desc_optional(self, node: Element) -> None:
pass
def depart_desc_optional(self, node: Element) -> None:
pass
def visit_desc_annotation(self, node: Element) -> None:
pass
def depart_desc_annotation(self, node: Element) -> None:
pass
# Docutils nodes
###############
def visit_paragraph(self, node: Element) -> None:
if not (
isinstance(
node.parent,
(nodes.list_item, nodes.entry, addnodes.desc_content, nodes.field_body),
)
and (len(node.parent) == 1)
):
self.new_state(0)
def depart_paragraph(self, node: Element) -> None:
if not (
isinstance(
node.parent,
(nodes.list_item, nodes.entry, addnodes.desc_content, nodes.field_body),
)
and (len(node.parent) == 1)
):
self.end_state(wrap=False)
def visit_reference(self, node: Element) -> None:
if len(node.children) == 1 and isinstance(
node.children[0], (nodes.literal, addnodes.literal_emphasis)
):
# For references containing only a literal or literal_emphasis, use the literal text
ref_text = node.children[0].astext()
if "refuri" in node:
self.reference_uri = node["refuri"]
elif "refid" in node:
self.reference_uri = f"#{node['refid']}"
else:
self.messages.append('References must have "refuri" or "refid" attribute.')
raise nodes.SkipNode
# Use _emphasis for literal_emphasis nodes
if isinstance(node.children[0], addnodes.literal_emphasis):
self.add_text(f"[*{ref_text}*]({self.reference_uri})")
else:
self.add_text(f"[`{ref_text}`]({self.reference_uri})")
raise nodes.SkipNode
else:
# Handle regular references
ref_text = node.astext()
if "refuri" in node:
self.reference_uri = node["refuri"]
elif "refid" in node:
self.reference_uri = f"#{node['refid']}"
else:
self.messages.append('References must have "refuri" or "refid" attribute.')
raise nodes.SkipNode
self.add_text(f"[{ref_text}]({self.reference_uri})")
def depart_reference(self, node: Element) -> None:
self.reference_uri = ""
def visit_title_reference(self, node) -> None:
self.add_text("<cite>")
def depart_title_reference(self, node) -> None:
self.add_text("</cite>")
def visit_image(self, node: Element) -> None:
self.add_text(f"")
def depart_image(self, node: Element) -> None:
pass
def visit_target(self, node: Element) -> None:
pass
def depart_target(self, node: Element) -> None:
pass
def visit_comment(self, node: Element) -> None:
raise nodes.SkipNode
def visit_admonition(self, node: Element) -> None:
self.new_state(0)
def depart_admonition(self, node: Element) -> None:
self.end_state()
def _visit_admonition(self, node: Element) -> None:
self.new_state(2)
def _depart_admonition(self, node: Element) -> None:
label = admonitionlabels[node.tagname]
self.stateindent[-1] += len(label)
self.end_state(first=label + ": ")
visit_attention = _visit_admonition
depart_attention = _depart_admonition
visit_caution = _visit_admonition
depart_caution = _depart_admonition
visit_danger = _visit_admonition
depart_danger = _depart_admonition
visit_error = _visit_admonition
depart_error = _depart_admonition
visit_hint = _visit_admonition
depart_hint = _depart_admonition
visit_important = _visit_admonition
depart_important = _depart_admonition
visit_note = _visit_admonition
depart_note = _depart_admonition
visit_tip = _visit_admonition
depart_tip = _depart_admonition
visit_warning = _visit_admonition
depart_warning = _depart_admonition
visit_seealso = _visit_admonition
depart_seealso = _depart_admonition
###################################################
# Lists
##################################################
def visit_definition(self, node: Element) -> None:
self.new_state()
def depart_definition(self, node: Element) -> None:
self.end_state()
def visit_definition_list(self, node: Element) -> None:
self.list_counter.append(-2)
def depart_definition_list(self, node: Element) -> None:
self.list_counter.pop()
def visit_definition_list_item(self, node: Element) -> None:
self._classifier_count_in_li = len(list(node.findall(nodes.classifier)))
def depart_definition_list_item(self, node: Element) -> None:
pass
def visit_list_item(self, node: Element) -> None:
self.in_list_item += 1
if self.list_counter[-1] == -1:
self.new_state(2)
# bullet list
elif self.list_counter[-1] == -2:
# definition list
pass
else:
# enumerated list
self.list_counter[-1] += 1
self.new_state(len(str(self.list_counter[-1])) + 2)
def depart_list_item(self, node: Element) -> None:
self.in_list_item -= 1
if self.list_counter[-1] == -1:
self.end_state(first="- ", wrap=False)
self.states[-1].pop()
elif self.list_counter[-1] == -2:
pass
else:
self.end_state(first=f"{self.list_counter[-1]}. ", wrap=False, end=None)
def visit_bullet_list(self, node: Element) -> None:
self.list_counter.append(-1)
self.new_state(2)
def depart_bullet_list(self, node: Element) -> None:
self.list_counter.pop()
self.add_text(self.nl)
self.end_state(wrap=False)
def visit_enumerated_list(self, node: Element) -> None:
self.list_counter.append(node.get("start", 1) - 1)
def depart_enumerated_list(self, node: Element) -> None:
self.list_counter.pop()
def visit_term(self, node: Element) -> None:
self.new_state(0)
def depart_term(self, node: Element) -> None:
if not self._classifier_count_in_li:
self.end_state(end=None)
def visit_classifier(self, node: Element) -> None:
self.add_text(" : ")
def depart_classifier(self, node: Element) -> None:
self._classifier_count_in_li -= 1
if not self._classifier_count_in_li:
self.end_state(end=None)
def visit_field_list(self, node: Element) -> None:
self.new_state(0)
def depart_field_list(self, node: Element) -> None:
self.end_state(wrap=False, end=None)
def visit_field(self, node: Element) -> None:
pass
def depart_field(self, node: Element) -> None:
pass
def visit_field_name(self, node: Element) -> None:
pass
def depart_field_name(self, node: Element) -> None:
self.add_text(": ")
def visit_field_body(self, node: Element) -> None:
pass
def depart_field_body(self, node: Element) -> None:
pass
# Inline elements
#################
def visit_emphasis(self, node: Element) -> None:
self.add_text("<em>")
def depart_emphasis(self, node: Element) -> None:
self.add_text("</em>")
def visit_literal_emphasis(self, node: Element) -> None:
return self.visit_emphasis(node)
def depart_literal_emphasis(self, node: Element) -> None:
return self.depart_emphasis(node)
def visit_strong(self, node: Element) -> None:
self.add_text("<strong>")
def depart_strong(self, node: Element) -> None:
self.add_text("</strong>")
def visit_literal_strong(self, node: Element) -> None:
return self.visit_strong(node)
def depart_literal_strong(self, node: Element) -> None:
return self.depart_strong(node)
def visit_literal(self, node: Element) -> None:
self.add_text("`")
def depart_literal(self, node: Element) -> None:
self.add_text("`")
def visit_literal_block(self, node: Element) -> None:
self.in_literal += 1
self.in_literal_block += 1
lang = node.get("language", "python")
self.new_state()
self.add_text(f"```{lang}\n")
def depart_literal_block(self, node: Element) -> None:
self.in_literal -= 1
self.in_literal_block -= 1
self.end_state(wrap=False, end=["```"])
def visit_inline(self, node: Element) -> None:
self.add_text("`")
def depart_inline(self, node: Element) -> None:
self.add_text("`")
def visit_problematic(self, node: Element) -> None:
self.add_text(f"```\n{node.astext()}\n```")
raise nodes.SkipNode
def visit_block_quote(self, node: Element) -> None:
self.add_text("> ")
def depart_block_quote(self, node: Element) -> None:
self.add_text(self.nl)
self.end_state(wrap=False)
def visit_transition(self, node: Element) -> None:
self.new_state(0)
def depart_transition(self, node: Element) -> None:
self.end_state(wrap=False)
def visit_line_block(self, node: Element) -> None:
self.new_state()
self.add_text("<div className='lineblock'>")
def depart_line_block(self, node: Element) -> None:
self.add_text("</div>")
self.end_state()
def visit_container(self, node: Element) -> None:
pass
def depart_container(self, node: Element) -> None:
pass
def visit_raw(self, node: Element) -> None:
if "text" in node.get("format", "").split():
self.new_state(0)
self.add_text(node.astext())
self.end_state(wrap=False)
raise nodes.SkipNode
def visit_line(self, node: Element) -> None:
pass
def depart_line(self, node: Element) -> None:
self.add_text("\n")
def visit_caption(self, node: Element) -> None:
pass
def depart_caption(self, node: Element) -> None:
pass
# Misc. skipped nodes
#####################o
def visit_index(self, node: Element) -> None:
raise nodes.SkipNode
def visit_toctree(self, node: Element) -> None:
raise nodes.SkipNode
################################################################################
# tables
################################################################################
# table
# tgroup [cols=x]
# colspec
# thead
# row
# entry
# paragraph (optional)
# tbody
# row
# entry
# paragraph (optional)
###############################################################################
def visit_table(self, node: Element) -> None:
self.new_state(0)
self.table_header = []
self.table_body = []
self.current_row = []
self.in_table_header = False
def depart_table(self, node: Element) -> None:
if self.table_header:
self.add_text("| " + " | ".join(self.table_header) + " |" + self.nl)
separators = []
for i, width in enumerate(self.colwidths):
align = self.colaligns[i]
if align == "left":
separators.append(":" + "-" * (width - 1))
elif align == "right":
separators.append("-" * (width - 1) + ":")
elif align == "center":
separators.append(":" + "-" * (width - 2) + ":")
else:
separators.append("-" * width)
self.add_text("| " + " | ".join(separators) + " |" + self.nl)
for row in self.table_body:
self.add_text("| " + " | ".join(row) + " |" + self.nl)
self.add_text(self.nl)
self.end_state(wrap=False)
def visit_thead(self, node: Element) -> None:
self.in_table_header = True
def depart_thead(self, node: Element) -> None:
self.in_table_header = False
def visit_tbody(self, node: Element) -> None:
pass
def depart_tbody(self, node: Element) -> None:
pass
def visit_tgroup(self, node: Element) -> None:
self.colwidths = []
self.colaligns = []
def depart_tgroup(self, node: Element) -> None:
pass
def visit_colspec(self, node: Element) -> None:
self.colwidths.append(node["colwidth"])
self.colaligns.append(node.get("align", "left"))
def depart_colspec(self, node: Element) -> None:
pass
def visit_row(self, node: Element) -> None:
self.current_row = []
def depart_row(self, node: Element) -> None:
if self.in_table_header:
self.table_header = self.current_row
else:
self.table_body.append(self.current_row)
def visit_entry(self, node: Element) -> None:
self.new_state(0)
def depart_entry(self, node: Element) -> None:
text = self.nl.join(
content.strip() if isinstance(content, str) else content[0].strip()
for _, content in self.states.pop()
if content
)
self.current_row.append(text.replace("\n", ""))
self.stateindent.pop()
########################
# Dagster specific #
########################
# TODO: Move these out of this module and extract out docusaurus style admonitions
def _flag_to_level(self, flag_type: str) -> str:
"""Maps flag type to style that will be using in CSS and admonitions."""
level = "info"
if flag_type == "preview":
level = "info"
if flag_type == "beta":
level = "info"
if flag_type == "superseded":
level = "warning"
if flag_type == "deprecated":
level = "warning"
return level
def visit_flag(self, node: Element) -> None:
flag_type = node.attributes["flag_type"]
message = node.attributes["message"].replace(":::", "")
level = self._flag_to_level(flag_type)
self.new_state()
self.add_text(f":::{level}[{flag_type}]\n")
self.add_text(f"{message}\n")
def depart_flag(self, node: Element) -> None:
self.add_text("\n:::\n")
self.end_state(wrap=False)
def visit_inline_flag(self, node: Element) -> None:
flag_type = node.attributes["flag_type"]
level = self._flag_to_level(flag_type)
self.add_text(f'<span className="flag flag-{level}">')
self.add_text(node.attributes["flag_type"])
def depart_inline_flag(self, node: Element) -> None:
self.add_text("</span>")
def visit_collapse_node(self, node: Element) -> None:
raise nodes.SkipNode
def visit_CollapseNode(self, node: Element) -> None:
raise nodes.SkipNode
| MdxTranslator |
python | gevent__gevent | src/greentest/3.11/test_httplib.py | {
"start": 56978,
"end": 60763
} | class ____(TestCase):
"""
Test peek(), read1(), readline()
"""
lines = (
'HTTP/1.1 200 OK\r\n'
'\r\n'
'hello world!\n'
'and now \n'
'for something completely different\n'
'foo'
)
lines_expected = lines[lines.find('hello'):].encode("ascii")
lines_chunked = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'3\r\n'
'd!\n\r\n'
'9\r\n'
'and now \n\r\n'
'23\r\n'
'for something completely different\n\r\n'
'3\r\n'
'foo\r\n'
'0\r\n' # terminating chunk
'\r\n' # end of trailers
)
def setUp(self):
sock = FakeSocket(self.lines)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
resp.fp = io.BufferedReader(resp.fp)
self.resp = resp
def test_peek(self):
resp = self.resp
# patch up the buffered peek so that it returns not too much stuff
oldpeek = resp.fp.peek
def mypeek(n=-1):
p = oldpeek(n)
if n >= 0:
return p[:n]
return p[:10]
resp.fp.peek = mypeek
all = []
while True:
# try a short peek
p = resp.peek(3)
if p:
self.assertGreater(len(p), 0)
# then unbounded peek
p2 = resp.peek()
self.assertGreaterEqual(len(p2), len(p))
self.assertTrue(p2.startswith(p))
next = resp.read(len(p2))
self.assertEqual(next, p2)
else:
next = resp.read()
self.assertFalse(next)
all.append(next)
if not next:
break
self.assertEqual(b"".join(all), self.lines_expected)
def test_readline(self):
resp = self.resp
self._verify_readline(self.resp.readline, self.lines_expected)
def test_readline_without_limit(self):
self._verify_readline(self.resp.readline, self.lines_expected, limit=-1)
def _verify_readline(self, readline, expected, limit=5):
all = []
while True:
# short readlines
line = readline(limit)
if line and line != b"foo":
if len(line) < 5:
self.assertTrue(line.endswith(b"\n"))
all.append(line)
if not line:
break
self.assertEqual(b"".join(all), expected)
self.assertTrue(self.resp.isclosed())
def test_read1(self):
resp = self.resp
def r():
res = resp.read1(4)
self.assertLessEqual(len(res), 4)
return res
readliner = Readliner(r)
self._verify_readline(readliner.readline, self.lines_expected)
def test_read1_unbounded(self):
resp = self.resp
all = []
while True:
data = resp.read1()
if not data:
break
all.append(data)
self.assertEqual(b"".join(all), self.lines_expected)
self.assertTrue(resp.isclosed())
def test_read1_bounded(self):
resp = self.resp
all = []
while True:
data = resp.read1(10)
if not data:
break
self.assertLessEqual(len(data), 10)
all.append(data)
self.assertEqual(b"".join(all), self.lines_expected)
self.assertTrue(resp.isclosed())
def test_read1_0(self):
self.assertEqual(self.resp.read1(0), b"")
self.assertFalse(self.resp.isclosed())
def test_peek_0(self):
p = self.resp.peek(0)
self.assertLessEqual(0, len(p))
| ExtendedReadTest |
python | allegroai__clearml | clearml/backend_api/session/jsonmodels/fields.py | {
"start": 11817,
"end": 12706
} | class ____(StringField):
"""Time field."""
types = (datetime.time,)
def __init__(self, str_format: str = None, *args: Any, **kwargs: Any) -> None:
"""Init.
:param str str_format: Format to cast time to (if `None` - casting to
ISO 8601 format).
"""
self.str_format = str_format
super(TimeField, self).__init__(*args, **kwargs)
def to_struct(self, value: datetime.time) -> str:
"""Cast `time` object to string."""
if self.str_format:
return value.strftime(self.str_format)
return value.isoformat()
def parse_value(self, value: Any) -> Optional[datetime.time]:
"""Parse string into instance of `time`."""
if value is None:
return value
if isinstance(value, datetime.time):
return value
return parse(value).timetz()
| TimeField |
python | ray-project__ray | release/nightly_tests/placement_group_tests/pg_run.py | {
"start": 463,
"end": 1600
} | class ____(object):
def __init__(self, i):
self.i = i
def train(self):
time.sleep(0.2)
print("train ", self.i)
def main():
ray.init(address="auto")
bundles = [{"CPU": 1, "GPU": 1}]
bundles += [{"CPU": 1} for _ in range(NUM_CPU_BUNDLES)]
pg = placement_group(bundles, strategy="PACK")
ray.get(pg.ready())
workers = [
Worker.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(placement_group=pg)
).remote(i)
for i in range(NUM_CPU_BUNDLES)
]
trainer = Trainer.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(placement_group=pg)
).remote(0)
start = time.time()
while True:
ray.get([workers[i].work.remote() for i in range(NUM_CPU_BUNDLES)])
ray.get(trainer.train.remote())
end = time.time()
if end - start > RUNTIME:
break
if "TEST_OUTPUT_JSON" in os.environ:
with open(os.environ["TEST_OUTPUT_JSON"], "w") as out_file:
results = {}
json.dump(results, out_file)
if __name__ == "__main__":
main()
| Trainer |
python | run-llama__llama_index | llama-index-integrations/embeddings/llama-index-embeddings-gaudi/llama_index/embeddings/gaudi/base.py | {
"start": 922,
"end": 1762
} | class ____(SentenceTransformer):
"""Child class that overrides the tokenize method from SentenceTransformer."""
def __init__(self, model_name_or_path, embedding_input_size=-1, **kwargs) -> None:
super().__init__(model_name_or_path, **kwargs)
self.embedding_input_size = embedding_input_size
def tokenize(self, texts):
"""Override tokenize method from SentenceTransformer."""
return self._first_module().tokenizer(
texts,
max_length=self.max_seq_length
if (
self.embedding_input_size == -1
or self.embedding_input_size > self.max_seq_length
)
else self.embedding_input_size,
padding="max_length",
return_tensors="pt",
truncation=True,
)
| GaudiSentenceTransformer |
python | ray-project__ray | python/ray/serve/tests/unit/test_proxy.py | {
"start": 5260,
"end": 5524
} | class ____:
def __init__(self, messages=None):
self.messages = messages or []
async def __call__(self):
while True:
if self.messages:
return self.messages.pop()
await asyncio.sleep(0.1)
| FakeHttpReceive |
python | langchain-ai__langchain | libs/core/langchain_core/output_parsers/base.py | {
"start": 4085,
"end": 11161
} | class ____(
BaseLLMOutputParser, RunnableSerializable[LanguageModelOutput, T]
):
"""Base class to parse the output of an LLM call.
Output parsers help structure language model responses.
Example:
```python
# Implement a simple boolean output parser
class BooleanOutputParser(BaseOutputParser[bool]):
true_val: str = "YES"
false_val: str = "NO"
def parse(self, text: str) -> bool:
cleaned_text = text.strip().upper()
if cleaned_text not in (
self.true_val.upper(),
self.false_val.upper(),
):
raise OutputParserException(
f"BooleanOutputParser expected output value to either be "
f"{self.true_val} or {self.false_val} (case-insensitive). "
f"Received {cleaned_text}."
)
return cleaned_text == self.true_val.upper()
@property
def _type(self) -> str:
return "boolean_output_parser"
```
"""
@property
@override
def InputType(self) -> Any:
"""Return the input type for the parser."""
return str | AnyMessage
@property
@override
def OutputType(self) -> type[T]:
"""Return the output type for the parser.
This property is inferred from the first type argument of the class.
Raises:
TypeError: If the class doesn't have an inferable `OutputType`.
"""
for base in self.__class__.mro():
if hasattr(base, "__pydantic_generic_metadata__"):
metadata = base.__pydantic_generic_metadata__
if "args" in metadata and len(metadata["args"]) > 0:
return metadata["args"][0]
msg = (
f"Runnable {self.__class__.__name__} doesn't have an inferable OutputType. "
"Override the OutputType property to specify the output type."
)
raise TypeError(msg)
@override
def invoke(
self,
input: str | BaseMessage,
config: RunnableConfig | None = None,
**kwargs: Any,
) -> T:
if isinstance(input, BaseMessage):
return self._call_with_config(
lambda inner_input: self.parse_result(
[ChatGeneration(message=inner_input)]
),
input,
config,
run_type="parser",
)
return self._call_with_config(
lambda inner_input: self.parse_result([Generation(text=inner_input)]),
input,
config,
run_type="parser",
)
@override
async def ainvoke(
self,
input: str | BaseMessage,
config: RunnableConfig | None = None,
**kwargs: Any | None,
) -> T:
if isinstance(input, BaseMessage):
return await self._acall_with_config(
lambda inner_input: self.aparse_result(
[ChatGeneration(message=inner_input)]
),
input,
config,
run_type="parser",
)
return await self._acall_with_config(
lambda inner_input: self.aparse_result([Generation(text=inner_input)]),
input,
config,
run_type="parser",
)
@override
def parse_result(self, result: list[Generation], *, partial: bool = False) -> T:
"""Parse a list of candidate model `Generation` objects into a specific format.
The return value is parsed from only the first `Generation` in the result, which
is assumed to be the highest-likelihood `Generation`.
Args:
result: A list of `Generation` to be parsed. The `Generation` objects are
assumed to be different candidate outputs for a single model input.
partial: Whether to parse the output as a partial result. This is useful
for parsers that can parse partial results.
Returns:
Structured output.
"""
return self.parse(result[0].text)
@abstractmethod
def parse(self, text: str) -> T:
"""Parse a single string model output into some structure.
Args:
text: String output of a language model.
Returns:
Structured output.
"""
async def aparse_result(
self, result: list[Generation], *, partial: bool = False
) -> T:
"""Async parse a list of candidate model `Generation` objects into a specific format.
The return value is parsed from only the first `Generation` in the result, which
is assumed to be the highest-likelihood `Generation`.
Args:
result: A list of `Generation` to be parsed. The `Generation` objects are
assumed to be different candidate outputs for a single model input.
partial: Whether to parse the output as a partial result. This is useful
for parsers that can parse partial results.
Returns:
Structured output.
""" # noqa: E501
return await run_in_executor(None, self.parse_result, result, partial=partial)
async def aparse(self, text: str) -> T:
"""Async parse a single string model output into some structure.
Args:
text: String output of a language model.
Returns:
Structured output.
"""
return await run_in_executor(None, self.parse, text)
# TODO: rename 'completion' -> 'text'.
def parse_with_prompt(
self,
completion: str,
prompt: PromptValue, # noqa: ARG002
) -> Any:
"""Parse the output of an LLM call with the input prompt for context.
The prompt is largely provided in the event the `OutputParser` wants
to retry or fix the output in some way, and needs information from
the prompt to do so.
Args:
completion: String output of a language model.
prompt: Input `PromptValue`.
Returns:
Structured output.
"""
return self.parse(completion)
def get_format_instructions(self) -> str:
"""Instructions on how the LLM output should be formatted."""
raise NotImplementedError
@property
def _type(self) -> str:
"""Return the output parser type for serialization."""
msg = (
f"_type property is not implemented in class {self.__class__.__name__}."
" This is required for serialization."
)
raise NotImplementedError(msg)
def dict(self, **kwargs: Any) -> dict:
"""Return dictionary representation of output parser."""
output_parser_dict = super().model_dump(**kwargs)
with contextlib.suppress(NotImplementedError):
output_parser_dict["_type"] = self._type
return output_parser_dict
| BaseOutputParser |
python | PrefectHQ__prefect | src/prefect/server/task_queue.py | {
"start": 2922,
"end": 3527
} | class ____:
"""A queue that can pull tasks from from any of a number of task queues"""
_queues: List[TaskQueue]
def __init__(self, task_keys: List[str]):
self._queues = [TaskQueue.for_key(task_key) for task_key in task_keys]
async def get(self) -> schemas.core.TaskRun:
"""Gets the next task_run from any of the given queues"""
while True:
for queue in self._queues:
try:
return queue.get_nowait()
except asyncio.QueueEmpty:
continue
await asyncio.sleep(0.01)
| MultiQueue |
python | MongoEngine__mongoengine | mongoengine/fields.py | {
"start": 17056,
"end": 17414
} | class ____(BaseField):
"""Boolean field type."""
def to_python(self, value):
try:
value = bool(value)
except (ValueError, TypeError):
pass
return value
def validate(self, value):
if not isinstance(value, bool):
self.error("BooleanField only accepts boolean values")
| BooleanField |
python | Textualize__rich | tests/test_pretty.py | {
"start": 6029,
"end": 20281
} | class ____(NamedTuple):
name: str
description: str
price: float
category: str
reviews: List[str]
def test_pretty_namedtuple() -> None:
console = Console(color_system=None)
console.begin_capture()
example_namedtuple = StockKeepingUnit(
"Sparkling British Spring Water",
"Carbonated spring water",
0.9,
"water",
["its amazing!", "its terrible!"],
)
result = pretty_repr(example_namedtuple)
print(result)
assert (
result
== """StockKeepingUnit(
name='Sparkling British Spring Water',
description='Carbonated spring water',
price=0.9,
category='water',
reviews=['its amazing!', 'its terrible!']
)"""
)
def test_pretty_namedtuple_length_one_no_trailing_comma() -> None:
instance = collections.namedtuple("Thing", ["name"])(name="Bob")
assert pretty_repr(instance) == "Thing(name='Bob')"
def test_pretty_namedtuple_empty() -> None:
instance = collections.namedtuple("Thing", [])()
assert pretty_repr(instance) == "Thing()"
def test_pretty_namedtuple_custom_repr() -> None:
class Thing(NamedTuple):
def __repr__(self):
return "XX"
assert pretty_repr(Thing()) == "XX"
def test_pretty_namedtuple_fields_invalid_type() -> None:
class LooksLikeANamedTupleButIsnt(tuple):
_fields = "blah"
instance = LooksLikeANamedTupleButIsnt()
result = pretty_repr(instance)
assert result == "()" # Treated as tuple
def test_pretty_namedtuple_max_depth() -> None:
instance = {"unit": StockKeepingUnit("a", "b", 1.0, "c", ["d", "e"])}
result = pretty_repr(instance, max_depth=1)
assert result == "{'unit': StockKeepingUnit(...)}"
def test_small_width() -> None:
test = ["Hello world! 12345"]
result = pretty_repr(test, max_width=10)
expected = "[\n 'Hello world! 12345'\n]"
assert result == expected
def test_ansi_in_pretty_repr() -> None:
class Hello:
def __repr__(self):
return "Hello \x1b[38;5;239mWorld!"
pretty = Pretty(Hello())
console = Console(file=io.StringIO(), record=True)
console.print(pretty)
result = console.export_text()
assert result == "Hello World!\n"
def test_broken_repr() -> None:
class BrokenRepr:
def __repr__(self):
1 / 0
test = [BrokenRepr()]
result = pretty_repr(test)
expected = "[<repr-error 'division by zero'>]"
assert result == expected
def test_broken_getattr() -> None:
class BrokenAttr:
def __getattr__(self, name):
1 / 0
def __repr__(self):
return "BrokenAttr()"
test = BrokenAttr()
result = pretty_repr(test)
assert result == "BrokenAttr()"
def test_reference_cycle_container() -> None:
test = []
test.append(test)
res = pretty_repr(test)
assert res == "[...]"
test = [1, []]
test[1].append(test)
res = pretty_repr(test)
assert res == "[1, [...]]"
# Not a cyclic reference, just a repeated reference
a = [2]
test = [1, [a, a]]
res = pretty_repr(test)
assert res == "[1, [[2], [2]]]"
def test_reference_cycle_namedtuple() -> None:
class Example(NamedTuple):
x: int
y: Any
test = Example(1, [Example(2, [])])
test.y[0].y.append(test)
res = pretty_repr(test)
assert res == "Example(x=1, y=[Example(x=2, y=[...])])"
# Not a cyclic reference, just a repeated reference
a = Example(2, None)
test = Example(1, [a, a])
res = pretty_repr(test)
assert res == "Example(x=1, y=[Example(x=2, y=None), Example(x=2, y=None)])"
def test_reference_cycle_dataclass() -> None:
@dataclass
class Example:
x: int
y: Any
test = Example(1, None)
test.y = test
res = pretty_repr(test)
assert res == "Example(x=1, y=...)"
test = Example(1, Example(2, None))
test.y.y = test
res = pretty_repr(test)
assert res == "Example(x=1, y=Example(x=2, y=...))"
# Not a cyclic reference, just a repeated reference
a = Example(2, None)
test = Example(1, [a, a])
res = pretty_repr(test)
assert res == "Example(x=1, y=[Example(x=2, y=None), Example(x=2, y=None)])"
def test_reference_cycle_attrs() -> None:
@attr.define
class Example:
x: int
y: Any
test = Example(1, None)
test.y = test
res = pretty_repr(test)
assert res == "Example(x=1, y=...)"
test = Example(1, Example(2, None))
test.y.y = test
res = pretty_repr(test)
assert res == "Example(x=1, y=Example(x=2, y=...))"
# Not a cyclic reference, just a repeated reference
a = Example(2, None)
test = Example(1, [a, a])
res = pretty_repr(test)
assert res == "Example(x=1, y=[Example(x=2, y=None), Example(x=2, y=None)])"
def test_reference_cycle_custom_repr() -> None:
class Example:
def __init__(self, x, y):
self.x = x
self.y = y
def __rich_repr__(self):
yield ("x", self.x)
yield ("y", self.y)
test = Example(1, None)
test.y = test
res = pretty_repr(test)
assert res == "Example(x=1, y=...)"
test = Example(1, Example(2, None))
test.y.y = test
res = pretty_repr(test)
assert res == "Example(x=1, y=Example(x=2, y=...))"
# Not a cyclic reference, just a repeated reference
a = Example(2, None)
test = Example(1, [a, a])
res = pretty_repr(test)
assert res == "Example(x=1, y=[Example(x=2, y=None), Example(x=2, y=None)])"
def test_max_depth() -> None:
d = {}
d["foo"] = {"fob": {"a": [1, 2, 3], "b": {"z": "x", "y": ["a", "b", "c"]}}}
assert pretty_repr(d, max_depth=0) == "{...}"
assert pretty_repr(d, max_depth=1) == "{'foo': {...}}"
assert pretty_repr(d, max_depth=2) == "{'foo': {'fob': {...}}}"
assert pretty_repr(d, max_depth=3) == "{'foo': {'fob': {'a': [...], 'b': {...}}}}"
assert (
pretty_repr(d, max_width=100, max_depth=4)
== "{'foo': {'fob': {'a': [1, 2, 3], 'b': {'z': 'x', 'y': [...]}}}}"
)
assert (
pretty_repr(d, max_width=100, max_depth=5)
== "{'foo': {'fob': {'a': [1, 2, 3], 'b': {'z': 'x', 'y': ['a', 'b', 'c']}}}}"
)
assert (
pretty_repr(d, max_width=100, max_depth=None)
== "{'foo': {'fob': {'a': [1, 2, 3], 'b': {'z': 'x', 'y': ['a', 'b', 'c']}}}}"
)
def test_max_depth_rich_repr() -> None:
class Foo:
def __init__(self, foo):
self.foo = foo
def __rich_repr__(self):
yield "foo", self.foo
class Bar:
def __init__(self, bar):
self.bar = bar
def __rich_repr__(self):
yield "bar", self.bar
assert (
pretty_repr(Foo(foo=Bar(bar=Foo(foo=[]))), max_depth=2)
== "Foo(foo=Bar(bar=Foo(...)))"
)
def test_max_depth_attrs() -> None:
@attr.define
class Foo:
foo = attr.field()
@attr.define
class Bar:
bar = attr.field()
assert (
pretty_repr(Foo(foo=Bar(bar=Foo(foo=[]))), max_depth=2)
== "Foo(foo=Bar(bar=Foo(...)))"
)
def test_max_depth_dataclass() -> None:
@dataclass
class Foo:
foo: object
@dataclass
class Bar:
bar: object
assert (
pretty_repr(Foo(foo=Bar(bar=Foo(foo=[]))), max_depth=2)
== "Foo(foo=Bar(bar=Foo(...)))"
)
def test_defaultdict() -> None:
test_dict = defaultdict(int, {"foo": 2})
result = pretty_repr(test_dict)
assert result == "defaultdict(<class 'int'>, {'foo': 2})"
def test_deque() -> None:
test_deque = deque([1, 2, 3])
result = pretty_repr(test_deque)
assert result == "deque([1, 2, 3])"
test_deque = deque([1, 2, 3], maxlen=None)
result = pretty_repr(test_deque)
assert result == "deque([1, 2, 3])"
test_deque = deque([1, 2, 3], maxlen=5)
result = pretty_repr(test_deque)
assert result == "deque([1, 2, 3], maxlen=5)"
test_deque = deque([1, 2, 3], maxlen=0)
result = pretty_repr(test_deque)
assert result == "deque(maxlen=0)"
test_deque = deque([])
result = pretty_repr(test_deque)
assert result == "deque()"
test_deque = deque([], maxlen=None)
result = pretty_repr(test_deque)
assert result == "deque()"
test_deque = deque([], maxlen=5)
result = pretty_repr(test_deque)
assert result == "deque(maxlen=5)"
test_deque = deque([], maxlen=0)
result = pretty_repr(test_deque)
assert result == "deque(maxlen=0)"
def test_array() -> None:
test_array = array("I", [1, 2, 3])
result = pretty_repr(test_array)
assert result == "array('I', [1, 2, 3])"
def test_tuple_of_one() -> None:
assert pretty_repr((1,)) == "(1,)"
def test_node() -> None:
node = Node("abc")
assert pretty_repr(node) == "abc: "
def test_indent_lines() -> None:
console = Console(width=100, color_system=None)
console.begin_capture()
console.print(Pretty([100, 200], indent_guides=True), width=8)
expected = """\
[
│ 100,
│ 200
]
"""
result = console.end_capture()
print(repr(result))
print(result)
assert result == expected
def test_pprint() -> None:
console = Console(color_system=None)
console.begin_capture()
pprint(1, console=console)
assert console.end_capture() == "1\n"
def test_pprint_max_values() -> None:
console = Console(color_system=None)
console.begin_capture()
pprint([1, 2, 3, 4, 5, 6, 7, 8, 9, 0], console=console, max_length=2)
assert console.end_capture() == "[1, 2, ... +8]\n"
def test_pprint_max_items() -> None:
console = Console(color_system=None)
console.begin_capture()
pprint({"foo": 1, "bar": 2, "egg": 3}, console=console, max_length=2)
assert console.end_capture() == """{'foo': 1, 'bar': 2, ... +1}\n"""
def test_pprint_max_string() -> None:
console = Console(color_system=None)
console.begin_capture()
pprint(["Hello" * 20], console=console, max_string=8)
assert console.end_capture() == """['HelloHel'+92]\n"""
def test_tuples() -> None:
console = Console(color_system=None)
console.begin_capture()
pprint((1,), console=console)
pprint((1,), expand_all=True, console=console)
pprint(((1,),), expand_all=True, console=console)
result = console.end_capture()
print(repr(result))
expected = "(1,)\n(\n│ 1,\n)\n(\n│ (\n│ │ 1,\n│ ),\n)\n"
print(result)
print("--")
print(expected)
assert result == expected
def test_newline() -> None:
console = Console(color_system=None)
console.begin_capture()
console.print(Pretty((1,), insert_line=True, expand_all=True))
result = console.end_capture()
expected = "\n(\n 1,\n)\n"
assert result == expected
def test_empty_repr() -> None:
class Foo:
def __repr__(self):
return ""
assert pretty_repr(Foo()) == ""
def test_attrs() -> None:
@attr.define
class Point:
x: int
y: int
foo: str = attr.field(repr=str.upper)
z: int = 0
result = pretty_repr(Point(1, 2, foo="bar"))
print(repr(result))
expected = "Point(x=1, y=2, foo=BAR, z=0)"
assert result == expected
def test_attrs_empty() -> None:
@attr.define
class Nada:
pass
result = pretty_repr(Nada())
print(repr(result))
expected = "Nada()"
assert result == expected
@skip_py310
@skip_py311
@skip_py312
@skip_py313
@skip_py314
def test_attrs_broken() -> None:
@attr.define
class Foo:
bar: int
foo = Foo(1)
del foo.bar
result = pretty_repr(foo)
print(repr(result))
expected = "Foo(bar=AttributeError('bar'))"
assert result == expected
@skip_py38
@skip_py39
def test_attrs_broken_310() -> None:
@attr.define
class Foo:
bar: int
foo = Foo(1)
del foo.bar
result = pretty_repr(foo)
print(repr(result))
if sys.version_info >= (3, 13):
expected = "Foo(\n bar=AttributeError(\"'tests.test_pretty.test_attrs_broken_310.<locals>.Foo' object has no attribute 'bar'\")\n)"
else:
expected = "Foo(bar=AttributeError(\"'Foo' object has no attribute 'bar'\"))"
assert result == expected
def test_user_dict() -> None:
class D1(UserDict):
pass
class D2(UserDict):
def __repr__(self):
return "FOO"
d1 = D1({"foo": "bar"})
d2 = D2({"foo": "bar"})
result = pretty_repr(d1, expand_all=True)
print(repr(result))
assert result == "{\n 'foo': 'bar'\n}"
result = pretty_repr(d2, expand_all=True)
print(repr(result))
assert result == "FOO"
def test_lying_attribute() -> None:
"""Test getattr doesn't break rich repr protocol"""
class Foo:
def __getattr__(self, attr):
return "foo"
foo = Foo()
result = pretty_repr(foo)
assert "Foo" in result
def test_measure_pretty() -> None:
"""Test measure respects expand_all"""
# https://github.com/Textualize/rich/issues/1998
console = Console()
pretty = Pretty(["alpha", "beta", "delta", "gamma"], expand_all=True)
measurement = console.measure(pretty)
assert measurement == Measurement(12, 12)
def test_tuple_rich_repr() -> None:
"""
Test that can use None as key to have tuple positional values.
"""
class Foo:
def __rich_repr__(self):
yield None, (1,)
assert pretty_repr(Foo()) == "Foo((1,))"
def test_tuple_rich_repr_default() -> None:
"""
Test that can use None as key to have tuple positional values and with a default.
"""
class Foo:
def __rich_repr__(self):
yield None, (1,), (1,)
assert pretty_repr(Foo()) == "Foo()"
def test_dataclass_no_attribute() -> None:
"""Regression test for https://github.com/Textualize/rich/issues/3417"""
from dataclasses import dataclass, field
@dataclass(eq=False)
class BadDataclass:
item: int = field(init=False)
# item is not provided
bad_data_class = BadDataclass()
console = Console()
with console.capture() as capture:
console.print(bad_data_class)
expected = "BadDataclass()\n"
result = capture.get()
assert result == expected
| StockKeepingUnit |
python | astropy__astropy | astropy/units/function/logarithmic.py | {
"start": 4805,
"end": 5788
} | class ____(LogUnit):
"""Logarithmic physical units expressed in magnitudes.
Parameters
----------
physical_unit : `~astropy.units.Unit` or `string`
Unit that is encapsulated within the magnitude function unit.
If not given, dimensionless.
function_unit : `~astropy.units.Unit` or `string`
By default, this is ``dex``, but this allows one to use an equivalent
unit such as ``0.5 dex``.
"""
@cached_property
def _default_function_unit(self):
from .units import dex
return dex
@property
def _quantity_class(self):
return Dex
def to_string(self, format="generic"):
if format == "cds":
if self.physical_unit == dimensionless_unscaled:
return "[-]" # by default, would get "[---]".
else:
return f"[{self.physical_unit.to_string(format=format)}]"
else:
return super().to_string(format=format)
| DexUnit |
python | python-poetry__poetry | tests/repositories/fixtures/pypi.org/generate.py | {
"start": 3613,
"end": 3930
} | class ____:
path: Path
md5: str = dataclasses.field(init=False)
sha256: str = dataclasses.field(init=False)
def __post_init__(self) -> None:
data = self.path.read_bytes()
self.sha256 = hashlib.sha256(data).hexdigest()
self.md5 = hashlib.md5(data).hexdigest()
| ReleaseFileMetadata |
python | cython__cython | Cython/Compiler/Nodes.py | {
"start": 5739,
"end": 12223
} | class ____:
# pos (string, int, int) Source file position
# is_name boolean Is a NameNode
# is_literal boolean Is a ConstNode
is_name = 0
is_none = 0
is_nonecheck = 0
is_literal = 0
is_terminator = 0
is_wrapper = False # is a DefNode wrapper for a C function
is_cproperty = False
is_templated_type_node = False
temps = None
# All descendants should set child_attrs to a list of the attributes
# containing nodes considered "children" in the tree. Each such attribute
# can either contain a single node or a list of nodes. See Visitor.py.
child_attrs = None
# Subset of attributes that are evaluated in the outer scope (e.g. function default arguments).
outer_attrs = None
cf_state = None
# This may be an additional (or 'actual') type that will be checked when
# this node is coerced to another type. This could be useful to set when
# the actual type to which it can coerce is known, but you want to leave
# the type a py_object_type
coercion_type = None
def __init__(self, pos, **kw):
self.pos = pos
self.__dict__.update(kw)
gil_message = "Operation"
nogil_check = None
in_nogil_context = False # For use only during code generation.
def gil_error(self, env=None):
error(self.pos, "%s not allowed without gil" % self.gil_message)
cpp_message = "Operation"
def cpp_check(self, env):
if not env.is_cpp():
self.cpp_error()
def cpp_error(self):
error(self.pos, "%s only allowed in c++" % self.cpp_message)
def clone_node(self):
"""Clone the node. This is defined as a shallow copy, except for member lists
amongst the child attributes (from get_child_accessors) which are also
copied. Lists containing child nodes are thus seen as a way for the node
to hold multiple children directly; the list is not treated as a separate
level in the tree."""
result = copy.copy(self)
for attrname in result.child_attrs:
value = getattr(result, attrname)
if isinstance(value, list):
setattr(result, attrname, [x for x in value])
return result
#
# There are 3 main phases of parse tree processing, applied in order to
# all the statements in a given scope-block:
#
# (0) analyse_declarations
# Make symbol table entries for all declarations at the current
# level, both explicit (def, cdef, etc.) and implicit (assignment
# to an otherwise undeclared name).
#
# (1) analyse_expressions
# Determine the result types of expressions and fill in the
# 'type' attribute of each ExprNode. Insert coercion nodes into the
# tree where needed to convert to and from Python objects.
# Replace tree nodes with more appropriate implementations found by
# the type analysis.
#
# (2) generate_code
# Emit C code for all declarations, statements and expressions.
#
# These phases are triggered by tree transformations.
# See the full pipeline in Pipeline.py.
#
def analyse_declarations(self, env):
pass
def analyse_expressions(self, env):
raise InternalError("analyse_expressions not implemented for %s" %
self.__class__.__name__)
def generate_code(self, code):
raise InternalError("generate_code not implemented for %s" %
self.__class__.__name__)
def annotate(self, code):
# mro does the wrong thing
if isinstance(self, BlockNode):
self.body.annotate(code)
def end_pos(self):
try:
return self._end_pos
except AttributeError:
pos = self.pos
if not self.child_attrs:
self._end_pos = pos
return pos
for attr in self.child_attrs:
child = getattr(self, attr)
# Sometimes lists, sometimes nodes
if child is None:
pass
elif isinstance(child, list):
for c in child:
pos = max(pos, c.end_pos())
else:
pos = max(pos, child.end_pos())
self._end_pos = pos
return pos
def dump(self, level=0, filter_out=("pos",), cutoff=100, encountered=None):
"""Debug helper method that returns a recursive string representation of this node.
"""
if cutoff == 0:
return "<...nesting level cutoff...>"
if encountered is None:
encountered = set()
if id(self) in encountered:
return "<%s (0x%x) -- already output>" % (self.__class__.__name__, id(self))
encountered.add(id(self))
def dump_child(x, level):
if isinstance(x, Node):
return x.dump(level, filter_out, cutoff-1, encountered)
elif isinstance(x, list):
return "[%s]" % ", ".join([dump_child(item, level) for item in x])
else:
return repr(x)
attrs = [(key, value) for key, value in self.__dict__.items() if key not in filter_out]
if len(attrs) == 0:
return "<%s (0x%x)>" % (self.__class__.__name__, id(self))
else:
indent = " " * level
res = "<%s (0x%x)\n" % (self.__class__.__name__, id(self))
for key, value in attrs:
res += "%s %s: %s\n" % (indent, key, dump_child(value, level + 1))
res += "%s>" % indent
return res
def dump_pos(self, mark_column=False, marker='(#)'):
"""Debug helper method that returns the source code context of this node as a string.
"""
if not self.pos:
return ''
source_desc, line, col = self.pos
contents = source_desc.get_lines(encoding='ASCII', error_handling='ignore')
# line numbers start at 1
lines = contents[max(0, line-3):line]
current = lines[-1]
if mark_column:
current = current[:col] + marker + current[col:]
lines[-1] = current.rstrip() + ' # <<<<<<<<<<<<<<'
lines += contents[line:line+2]
code = '\n'.join(lines)
return f'"{source_desc.get_escaped_description()}":{line:d}:{col:d}\n{code}\n'
| Node |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/matchValue1.py | {
"start": 2211,
"end": 2275
} | class ____(Enum):
gold = 1
silver = 2
bronze = 3
| Medal |
python | run-llama__llama_index | llama-index-integrations/voice_agents/llama-index-voice-agents-elevenlabs/llama_index/voice_agents/elevenlabs/interface.py | {
"start": 165,
"end": 636
} | class ____(DefaultAudioInterface, BaseVoiceAgentInterface):
def __init__(self, *args, **kwargs):
super().__init__()
# Some methods from BaseVoiceAgentInterface are not implemented in DefaultAudioInterface, so we implement toy methods here
def _speaker_callback(self, *args, **kwargs):
pass
def _microphone_callback(self, *args, **kwargs):
pass
def receive(self, data, *args, **kwargs):
pass
| ElevenLabsVoiceAgentInterface |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/genericType29.py | {
"start": 279,
"end": 386
} | class ____(Generic[T1]): ...
def func1(x: A[T2]) -> A[T2 | None]: ...
x1: A[int | None] = func1(A[int]())
| A |
python | getsentry__sentry | tests/sentry/api/endpoints/test_organization_root_cause_analysis.py | {
"start": 399,
"end": 13741
} | class ____(MetricsAPIBaseTestCase):
def setUp(self) -> None:
super().setUp()
self.login_as(self.user)
self.org = self.create_organization(owner=self.user)
self.project = self.create_project(organization=self.org)
self.url = reverse(
"sentry-api-0-organization-events-root-cause-analysis", args=[self.org.slug]
)
self.store_performance_metric(
name=TransactionMRI.DURATION.value,
tags={"transaction": "foo"},
org_id=self.org.id,
project_id=self.project.id,
value=1,
)
self.trace_id = "a" * 32
@property
def now(self):
return MetricsAPIBaseTestCase.MOCK_DATETIME.replace(tzinfo=None)
def create_transaction(
self,
transaction,
trace_id,
span_id,
parent_span_id,
spans,
project_id,
start_timestamp,
duration,
):
timestamp = start_timestamp + timedelta(milliseconds=duration)
data = load_data(
"transaction",
trace=trace_id,
span_id=span_id,
spans=spans,
start_timestamp=start_timestamp,
timestamp=timestamp,
)
data["transaction"] = transaction
data["contexts"]["trace"]["parent_span_id"] = parent_span_id
return self.store_event(data, project_id=project_id)
def test_transaction_name_required(self) -> None:
response = self.client.get(
self.url,
format="json",
data={
"project": self.project.id,
"breakpoint": (self.now - timedelta(days=1)).isoformat(),
},
)
assert response.status_code == 400, response.content
def test_project_id_required(self) -> None:
response = self.client.get(
self.url,
format="json",
data={
"transaction": "foo",
},
)
assert response.status_code == 400, response.content
def test_breakpoint_required(self) -> None:
response = self.client.get(
self.url,
format="json",
data={"transaction": "foo", "project": self.project.id},
)
assert response.status_code == 400, response.content
def test_transaction_must_exist(self) -> None:
response = self.client.get(
self.url,
format="json",
data={
"transaction": "foo",
"project": self.project.id,
"breakpoint": self.now - timedelta(days=1),
"start": self.now - timedelta(days=3),
"end": self.now,
},
)
assert response.status_code == 200, response.content
response = self.client.get(
self.url,
format="json",
data={
"transaction": "does not exist",
"project": self.project.id,
"breakpoint": self.now - timedelta(days=1),
"start": self.now - timedelta(days=3),
"end": self.now,
},
)
assert response.status_code == 400, response.content
# TODO: Enable this test when adding a serializer to handle validation
# def test_breakpoint_must_be_in_the_past(self) -> None:
# response = self.client.get(
# self.url,
# format="json",
# data={
# "transaction": "foo",
# "project": self.project.id,
# "breakpoint": (self.now + timedelta(days=1)).isoformat(),
# },
# )
# assert response.status_code == 400, response.content
def test_returns_change_data_for_regressed_spans(self) -> None:
before_timestamp = self.now - timedelta(days=2)
before_span = {
"parent_span_id": "a" * 16,
"span_id": "e" * 16,
"start_timestamp": before_timestamp.isoformat(),
"timestamp": before_timestamp.isoformat(),
"op": "django.middleware",
"description": "middleware span",
"exclusive_time": 60.0,
}
# before
self.create_transaction(
transaction="foo",
trace_id=self.trace_id,
span_id="a" * 16,
parent_span_id="b" * 16,
spans=[before_span],
project_id=self.project.id,
start_timestamp=before_timestamp,
duration=60,
)
self.create_transaction(
transaction="foo",
trace_id=self.trace_id,
span_id="b" * 16,
parent_span_id="b" * 16,
spans=[{**before_span, "op": "db", "description": "db span"}],
project_id=self.project.id,
start_timestamp=before_timestamp,
duration=60,
)
# after
after_timestamp = self.now - timedelta(hours=1)
self.create_transaction(
transaction="foo",
trace_id=self.trace_id,
span_id="c" * 16,
parent_span_id="d" * 16,
spans=[
{
"parent_span_id": "e" * 16,
"span_id": "f" * 16,
"start_timestamp": after_timestamp.isoformat(),
"timestamp": after_timestamp.isoformat(),
"op": "django.middleware",
"description": "middleware span",
"exclusive_time": 40.0,
},
{
"parent_span_id": "1" * 16,
"span_id": "2" * 16,
"start_timestamp": after_timestamp.isoformat(),
"timestamp": after_timestamp.isoformat(),
"op": "django.middleware",
"description": "middleware span",
"exclusive_time": 600.0,
},
{
"parent_span_id": "1" * 16,
"span_id": "3" * 16,
"start_timestamp": after_timestamp.isoformat(),
"timestamp": after_timestamp.isoformat(),
"op": "django.middleware",
"description": "middleware span",
"exclusive_time": 60.0,
},
# This db span shouldn't appear in the results
# since there are no changes
{**before_span, "span_id": "5" * 16, "op": "db", "description": "db span"},
],
project_id=self.project.id,
start_timestamp=after_timestamp,
duration=600,
)
response = self.client.get(
self.url,
format="json",
data={
"transaction": "foo",
"project": self.project.id,
"breakpoint": self.now - timedelta(days=1),
"start": self.now - timedelta(days=3),
"end": self.now,
},
)
assert response.status_code == 200, response.content
assert response.data == [
{
"span_op": "django.middleware",
"span_group": "2b9cbb96dbf59baa",
"span_description": "middleware span",
"score": 1.1166666666666667,
"spm_before": 0.00034722222222222224,
"spm_after": 0.0020833333333333333,
"p95_before": 60.0,
"p95_after": 546.0,
},
{
"p95_after": 60.0,
"p95_before": 60.0,
"score": 0.020833333333333336,
"span_description": "db span",
"span_group": "5ad8c5a1e8d0e5f7",
"span_op": "db",
"spm_after": 0.0006944444444444445,
"spm_before": 0.00034722222222222224,
},
]
def test_results_are_limited(self) -> None:
# Before
self.create_transaction(
transaction="foo",
trace_id=self.trace_id,
span_id="a" * 16,
parent_span_id="b" * 16,
spans=[
{
"parent_span_id": "a" * 16,
"span_id": "e" * 16,
"start_timestamp": (self.now - timedelta(days=2)).isoformat(),
"timestamp": (self.now - timedelta(days=2)).isoformat(),
"op": "django.middleware",
"description": "middleware span",
"exclusive_time": 60.0,
}
],
project_id=self.project.id,
start_timestamp=self.now - timedelta(days=2),
duration=60,
)
# After
self.create_transaction(
transaction="foo",
trace_id=self.trace_id,
span_id="a" * 16,
parent_span_id="b" * 16,
spans=[
{
"parent_span_id": "a" * 16,
"span_id": "e" * 16,
"start_timestamp": (self.now - timedelta(hours=1)).isoformat(),
"timestamp": (self.now - timedelta(hours=1)).isoformat(),
"op": "django.middleware",
"description": "middleware span",
"exclusive_time": 100.0,
},
{
"parent_span_id": "a" * 16,
"span_id": "f" * 16,
"start_timestamp": (self.now - timedelta(hours=1)).isoformat(),
"timestamp": (self.now - timedelta(hours=1)).isoformat(),
"op": "db",
"description": "db",
"exclusive_time": 10000.0,
},
],
project_id=self.project.id,
start_timestamp=self.now - timedelta(hours=1),
duration=10100,
)
response = self.client.get(
self.url,
format="json",
data={
"transaction": "foo",
"project": self.project.id,
"breakpoint": self.now - timedelta(days=1),
"start": self.now - timedelta(days=3),
"end": self.now,
"per_page": 1,
},
)
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data == [
{
"span_op": "db",
"span_group": "d77d5e503ad1439f",
"score": 6.944444444444445,
"spm_before": 0.0,
"spm_after": 0.0006944444444444445,
"p95_before": 0.0,
"p95_after": 10000.0,
"span_description": "db",
}
]
def test_analysis_leaves_a_buffer_around_breakpoint_to_ignore_mixed_transactions(self) -> None:
breakpoint_timestamp = self.now - timedelta(days=1)
before_timestamp = breakpoint_timestamp - timedelta(hours=1)
after_timestamp = breakpoint_timestamp + timedelta(hours=1)
# Before
self.create_transaction(
transaction="foo",
trace_id=self.trace_id,
span_id="a" * 16,
parent_span_id="b" * 16,
spans=[
{
"parent_span_id": "a" * 16,
"span_id": "e" * 16,
"start_timestamp": before_timestamp.isoformat(),
"timestamp": before_timestamp.isoformat(),
"op": "django.middleware",
"description": "middleware span",
"exclusive_time": 60.0,
}
],
project_id=self.project.id,
start_timestamp=before_timestamp,
duration=60,
)
# After
self.create_transaction(
transaction="foo",
trace_id=self.trace_id,
span_id="a" * 16,
parent_span_id="b" * 16,
spans=[
{
"parent_span_id": "a" * 16,
"span_id": "e" * 16,
"start_timestamp": after_timestamp.isoformat(),
"timestamp": after_timestamp.isoformat(),
"op": "django.middleware",
"description": "middleware span",
"exclusive_time": 100.0,
},
],
project_id=self.project.id,
start_timestamp=after_timestamp,
duration=200,
)
response = self.client.get(
self.url,
format="json",
data={
"transaction": "foo",
"project": self.project.id,
"breakpoint": breakpoint_timestamp,
"start": self.now - timedelta(days=3),
"end": self.now,
},
)
assert response.status_code == 200, response.content
# The spans occur within 1 hour of the breakpoint, so they're ignored
# Before spans occur 1 hour before breakpoint
# After spans occur 1 hour after breakpoint
assert response.data == []
| OrganizationRootCauseAnalysisTest |
python | astropy__astropy | astropy/nddata/bitmask.py | {
"start": 5646,
"end": 27392
} | class ____(metaclass=BitFlagNameMeta):
"""
A base class for bit flag name maps used to describe data quality (DQ)
flags of images by provinding a mapping from a mnemonic flag name to a flag
value.
Mapping for a specific instrument should subclass this class.
Subclasses should define flags as class attributes with integer values
that are powers of 2. Each bit flag may also contain a string
comment following the flag value.
Examples
--------
>>> from astropy.nddata.bitmask import BitFlagNameMap
>>> class ST_DQ(BitFlagNameMap):
... __version__ = '1.0.0' # optional
... CR = 1, 'Cosmic Ray'
... CLOUDY = 4 # no docstring comment
... RAINY = 8, 'Dome closed'
...
>>> class ST_CAM1_DQ(ST_DQ):
... HOT = 16
... DEAD = 32
"""
def extend_bit_flag_map(cls_name, base_cls=BitFlagNameMap, **kwargs):
"""
A convenience function for creating bit flags maps by subclassing an
existing map and adding additional flags supplied as keyword arguments.
Parameters
----------
cls_name : str
Class name of the bit flag map to be created.
base_cls : BitFlagNameMap, optional
Base class for the new bit flag map.
**kwargs : int
Each supplied keyword argument will be used to define bit flag
names in the new map. In addition to bit flag names, ``__version__`` is
allowed to indicate the version of the newly created map.
Examples
--------
>>> from astropy.nddata.bitmask import extend_bit_flag_map
>>> ST_DQ = extend_bit_flag_map('ST_DQ', __version__='1.0.0', CR=1, CLOUDY=4, RAINY=8)
>>> ST_CAM1_DQ = extend_bit_flag_map('ST_CAM1_DQ', ST_DQ, HOT=16, DEAD=32)
>>> ST_CAM1_DQ['HOT'] # <-- Access flags as dictionary keys
16
>>> ST_CAM1_DQ.HOT # <-- Access flags as class attributes
16
"""
new_cls = BitFlagNameMeta.__new__(
BitFlagNameMeta, cls_name, (base_cls,), {"_locked": False}
)
for k, v in kwargs.items():
try:
setattr(new_cls, k, v)
except AttributeError as e:
if new_cls[k] != int(v):
raise e
new_cls._locked = True
return new_cls
def interpret_bit_flags(bit_flags, flip_bits=None, flag_name_map=None):
"""
Converts input bit flags to a single integer value (bit mask) or `None`.
When input is a list of flags (either a Python list of integer flags or a
string of comma-, ``'|'``-, or ``'+'``-separated list of flags),
the returned bit mask is obtained by summing input flags.
.. note::
In order to flip the bits of the returned bit mask,
for input of `str` type, prepend '~' to the input string. '~' must
be prepended to the *entire string* and not to each bit flag! For
input that is already a bit mask or a Python list of bit flags, set
``flip_bits`` for `True` in order to flip the bits of the returned
bit mask.
Parameters
----------
bit_flags : int, str, list, None
An integer bit mask or flag, `None`, a string of comma-, ``'|'``- or
``'+'``-separated list of integer bit flags or mnemonic flag names,
or a Python list of integer bit flags. If ``bit_flags`` is a `str`
and if it is prepended with '~', then the output bit mask will have
its bits flipped (compared to simple sum of input flags).
For input ``bit_flags`` that is already a bit mask or a Python list
of bit flags, bit-flipping can be controlled through ``flip_bits``
parameter.
.. note::
When ``bit_flags`` is a list of flag names, the ``flag_name_map``
parameter must be provided.
.. note::
Only one flag separator is supported at a time. ``bit_flags``
string should not mix ``','``, ``'+'``, and ``'|'`` separators.
flip_bits : bool, None
Indicates whether or not to flip the bits of the returned bit mask
obtained from input bit flags. This parameter must be set to `None`
when input ``bit_flags`` is either `None` or a Python list of flags.
flag_name_map : BitFlagNameMap
A `BitFlagNameMap` object that provides mapping from mnemonic
bit flag names to integer bit values in order to translate mnemonic
flags to numeric values when ``bit_flags`` that are comma- or
'+'-separated list of menmonic bit flag names.
Returns
-------
bitmask : int or None
Returns an integer bit mask formed from the input bit value or `None`
if input ``bit_flags`` parameter is `None` or an empty string.
If input string value was prepended with '~' (or ``flip_bits`` was set
to `True`), then returned value will have its bits flipped
(inverse mask).
Examples
--------
>>> from astropy.nddata.bitmask import interpret_bit_flags, extend_bit_flag_map
>>> ST_DQ = extend_bit_flag_map('ST_DQ', CR=1, CLOUDY=4, RAINY=8, HOT=16, DEAD=32)
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags(28))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('4,8,16'))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('CLOUDY,RAINY,HOT', flag_name_map=ST_DQ))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~4,8,16'))
'1111111111100011'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~(4+8+16)'))
'1111111111100011'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~(CLOUDY+RAINY+HOT)',
... flag_name_map=ST_DQ))
'1111111111100011'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags([4, 8, 16]))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags([4, 8, 16], flip_bits=True))
'1111111111100011'
"""
has_flip_bits = flip_bits is not None
flip_bits = bool(flip_bits)
allow_non_flags = False
if _is_int(bit_flags):
return ~int(bit_flags) if flip_bits else int(bit_flags)
elif bit_flags is None:
if has_flip_bits:
raise TypeError(
"Keyword argument 'flip_bits' must be set to 'None' when "
"input 'bit_flags' is None."
)
return None
elif isinstance(bit_flags, str):
if has_flip_bits:
raise TypeError(
"Keyword argument 'flip_bits' is not permitted for "
"comma-separated string lists of bit flags. Prepend '~' to "
"the string to indicate bit-flipping."
)
bit_flags = str(bit_flags).strip()
if bit_flags.upper() in ["", "NONE", "INDEF"]:
return None
# check whether bitwise-NOT is present and if it is, check that it is
# in the first position:
bitflip_pos = bit_flags.find("~")
if bitflip_pos == 0:
flip_bits = True
bit_flags = bit_flags[1:].lstrip()
else:
if bitflip_pos > 0:
raise ValueError("Bitwise-NOT must precede bit flag list.")
flip_bits = False
# basic check for correct use of parenthesis:
while True:
nlpar = bit_flags.count("(")
nrpar = bit_flags.count(")")
if nlpar == 0 and nrpar == 0:
break
if nlpar != nrpar:
raise ValueError("Unbalanced parentheses in bit flag list.")
lpar_pos = bit_flags.find("(")
rpar_pos = bit_flags.rfind(")")
if lpar_pos > 0 or rpar_pos < (len(bit_flags) - 1):
raise ValueError(
"Incorrect syntax (incorrect use of parenthesis) in bit flag list."
)
bit_flags = bit_flags[1:-1].strip()
if sum(k in bit_flags for k in "+,|") > 1:
raise ValueError(
"Only one type of bit flag separator may be used in one "
"expression. Allowed separators are: '+', '|', or ','."
)
if "," in bit_flags:
bit_flags = bit_flags.split(",")
elif "+" in bit_flags:
bit_flags = bit_flags.split("+")
elif "|" in bit_flags:
bit_flags = bit_flags.split("|")
else:
if bit_flags == "":
raise ValueError(
"Empty bit flag lists not allowed when either bitwise-NOT "
"or parenthesis are present."
)
bit_flags = [bit_flags]
bit_flags = [f.strip() for f in bit_flags]
if flag_name_map is not None:
try:
int(bit_flags[0])
except ValueError:
bit_flags = [flag_name_map[f] for f in bit_flags]
allow_non_flags = len(bit_flags) == 1
elif hasattr(bit_flags, "__iter__"):
if not all(_is_int(flag) for flag in bit_flags):
if flag_name_map is not None and all(
isinstance(flag, str) for flag in bit_flags
):
bit_flags = [flag_name_map[f] for f in bit_flags]
else:
raise TypeError(
"Every bit flag in a list must be either an "
"integer flag value or a 'str' flag name."
)
else:
raise TypeError("Unsupported type for argument 'bit_flags'.")
bitset = set(map(int, bit_flags))
if len(bitset) != len(bit_flags):
warnings.warn("Duplicate bit flags will be ignored")
bitmask = 0
for v in bitset:
if not _is_bit_flag(v) and not allow_non_flags:
raise ValueError(
f"Input list contains invalid (not powers of two) bit flag: {v}"
)
bitmask += v
if flip_bits:
bitmask = ~bitmask
return bitmask
def bitfield_to_boolean_mask(
bitfield,
ignore_flags=0,
flip_bits=None,
good_mask_value=False,
dtype=np.bool_,
flag_name_map=None,
):
"""
bitfield_to_boolean_mask(bitfield, ignore_flags=None, flip_bits=None, \
good_mask_value=False, dtype=numpy.bool_)
Converts an array of bit fields to a boolean (or integer) mask array
according to a bit mask constructed from the supplied bit flags (see
``ignore_flags`` parameter).
This function is particularly useful to convert data quality arrays to
boolean masks with selective filtering of DQ flags.
Parameters
----------
bitfield : ndarray
An array of bit flags. By default, values different from zero are
interpreted as "bad" values and values equal to zero are considered
as "good" values. However, see ``ignore_flags`` parameter on how to
selectively ignore some bits in the ``bitfield`` array data.
ignore_flags : int, str, list, None (default = 0)
An integer bit mask, `None`, a Python list of bit flags, a comma-,
or ``'|'``-separated, ``'+'``-separated string list of integer
bit flags or mnemonic flag names that indicate what bits in the input
``bitfield`` should be *ignored* (i.e., zeroed), or `None`.
.. note::
When ``bit_flags`` is a list of flag names, the ``flag_name_map``
parameter must be provided.
| Setting ``ignore_flags`` to `None` effectively will make
`bitfield_to_boolean_mask` interpret all ``bitfield`` elements
as "good" regardless of their value.
| When ``ignore_flags`` argument is an integer bit mask, it will be
combined using bitwise-NOT and bitwise-AND with each element of the
input ``bitfield`` array (``~ignore_flags & bitfield``). If the
resultant bitfield element is non-zero, that element will be
interpreted as a "bad" in the output boolean mask and it will be
interpreted as "good" otherwise. ``flip_bits`` parameter may be used
to flip the bits (``bitwise-NOT``) of the bit mask thus effectively
changing the meaning of the ``ignore_flags`` parameter from "ignore"
to "use only" these flags.
.. note::
Setting ``ignore_flags`` to 0 effectively will assume that all
non-zero elements in the input ``bitfield`` array are to be
interpreted as "bad".
| When ``ignore_flags`` argument is a Python list of integer bit
flags, these flags are added together to create an integer bit mask.
Each item in the list must be a flag, i.e., an integer that is an
integer power of 2. In order to flip the bits of the resultant
bit mask, use ``flip_bits`` parameter.
| Alternatively, ``ignore_flags`` may be a string of comma- or
``'+'``(or ``'|'``)-separated list of integer bit flags that should
be added (bitwise OR) together to create an integer bit mask.
For example, both ``'4,8'``, ``'4|8'``, and ``'4+8'`` are equivalent
and indicate that bit flags 4 and 8 in the input ``bitfield``
array should be ignored when generating boolean mask.
.. note::
``'None'``, ``'INDEF'``, and empty (or all white space) strings
are special values of string ``ignore_flags`` that are
interpreted as `None`.
.. note::
Each item in the list must be a flag, i.e., an integer that is an
integer power of 2. In addition, for convenience, an arbitrary
**single** integer is allowed and it will be interpreted as an
integer bit mask. For example, instead of ``'4,8'`` one could
simply provide string ``'12'``.
.. note::
Only one flag separator is supported at a time. ``ignore_flags``
string should not mix ``','``, ``'+'``, and ``'|'`` separators.
.. note::
When ``ignore_flags`` is a `str` and when it is prepended with
'~', then the meaning of ``ignore_flags`` parameters will be
reversed: now it will be interpreted as a list of bit flags to be
*used* (or *not ignored*) when deciding which elements of the
input ``bitfield`` array are "bad". Following this convention,
an ``ignore_flags`` string value of ``'~0'`` would be equivalent
to setting ``ignore_flags=None``.
.. warning::
Because prepending '~' to a string ``ignore_flags`` is equivalent
to setting ``flip_bits`` to `True`, ``flip_bits`` cannot be used
with string ``ignore_flags`` and it must be set to `None`.
flip_bits : bool, None (default = None)
Specifies whether or not to invert the bits of the bit mask either
supplied directly through ``ignore_flags`` parameter or built from the
bit flags passed through ``ignore_flags`` (only when bit flags are
passed as Python lists of integer bit flags). Occasionally, it may be
useful to *consider only specific bit flags* in the ``bitfield``
array when creating a boolean mask as opposed to *ignoring* specific
bit flags as ``ignore_flags`` behaves by default. This can be achieved
by inverting/flipping the bits of the bit mask created from
``ignore_flags`` flags which effectively changes the meaning of the
``ignore_flags`` parameter from "ignore" to "use only" these flags.
Setting ``flip_bits`` to `None` means that no bit flipping will be
performed. Bit flipping for string lists of bit flags must be
specified by prepending '~' to string bit flag lists
(see documentation for ``ignore_flags`` for more details).
.. warning::
This parameter can be set to either `True` or `False` **ONLY** when
``ignore_flags`` is either an integer bit mask or a Python
list of integer bit flags. When ``ignore_flags`` is either
`None` or a string list of flags, ``flip_bits`` **MUST** be set
to `None`.
good_mask_value : int, bool (default = False)
This parameter is used to derive the values that will be assigned to
the elements in the output boolean mask array that correspond to the
"good" bit fields (that are 0 after zeroing bits specified by
``ignore_flags``) in the input ``bitfield`` array. When
``good_mask_value`` is non-zero or ``numpy.True_`` then values in the
output boolean mask array corresponding to "good" bit fields in
``bitfield`` will be ``numpy.True_`` (if ``dtype`` is ``numpy.bool_``)
or 1 (if ``dtype`` is of numerical type) and values of corresponding
to "bad" flags will be ``numpy.False_`` (or 0). When
``good_mask_value`` is zero or ``numpy.False_`` then the values
in the output boolean mask array corresponding to "good" bit fields
in ``bitfield`` will be ``numpy.False_`` (if ``dtype`` is
``numpy.bool_``) or 0 (if ``dtype`` is of numerical type) and values
of corresponding to "bad" flags will be ``numpy.True_`` (or 1).
dtype : data-type (default = ``numpy.bool_``)
The desired data-type for the output binary mask array.
flag_name_map : BitFlagNameMap
A `BitFlagNameMap` object that provides mapping from mnemonic
bit flag names to integer bit values in order to translate mnemonic
flags to numeric values when ``bit_flags`` that are comma- or
'+'-separated list of menmonic bit flag names.
Returns
-------
mask : ndarray
Returns an array of the same dimensionality as the input ``bitfield``
array whose elements can have two possible values,
e.g., ``numpy.True_`` or ``numpy.False_`` (or 1 or 0 for integer
``dtype``) according to values of to the input ``bitfield`` elements,
``ignore_flags`` parameter, and the ``good_mask_value`` parameter.
Examples
--------
>>> from astropy.nddata import bitmask
>>> import numpy as np
>>> dqarr = np.asarray([[0, 0, 1, 2, 0, 8, 12, 0],
... [10, 4, 0, 0, 0, 16, 6, 0]])
>>> flag_map = bitmask.extend_bit_flag_map(
... 'ST_DQ', CR=2, CLOUDY=4, RAINY=8, HOT=16, DEAD=32
... )
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=0,
... dtype=int)
array([[0, 0, 1, 1, 0, 1, 1, 0],
[1, 1, 0, 0, 0, 1, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=0,
... dtype=bool)
array([[False, False, True, True, False, True, True, False],
[ True, True, False, False, False, True, True, False]]...)
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=6,
... good_mask_value=0, dtype=int)
array([[0, 0, 1, 0, 0, 1, 1, 0],
[1, 0, 0, 0, 0, 1, 0, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=~6,
... good_mask_value=0, dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=6, dtype=int,
... flip_bits=True, good_mask_value=0)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags='~(2+4)',
... good_mask_value=0, dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=[2, 4],
... flip_bits=True, good_mask_value=0,
... dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags='~(CR,CLOUDY)',
... good_mask_value=0, dtype=int,
... flag_name_map=flag_map)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags='~(CR+CLOUDY)',
... good_mask_value=0, dtype=int,
... flag_name_map=flag_map)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
"""
bitfield = np.asarray(bitfield)
if not np.issubdtype(bitfield.dtype, np.integer):
raise TypeError("Input bitfield array must be of integer type.")
ignore_mask = interpret_bit_flags(
ignore_flags, flip_bits=flip_bits, flag_name_map=flag_name_map
)
if ignore_mask is None:
if good_mask_value:
mask = np.ones_like(bitfield, dtype=dtype)
else:
mask = np.zeros_like(bitfield, dtype=dtype)
return mask
# filter out bits beyond the maximum supported by the data type:
ignore_mask = ignore_mask & _SUPPORTED_FLAGS
# invert the "ignore" mask:
ignore_mask = np.bitwise_not(
ignore_mask, dtype=bitfield.dtype.type, casting="unsafe"
)
mask = np.empty_like(bitfield, dtype=np.bool_)
np.bitwise_and(bitfield, ignore_mask, out=mask, casting="unsafe")
if good_mask_value:
np.logical_not(mask, out=mask)
return mask.astype(dtype=dtype, subok=False, copy=False)
| BitFlagNameMap |
python | astropy__astropy | astropy/units/structured.py | {
"start": 2027,
"end": 18577
} | class ____:
"""Container for units for a structured Quantity.
Parameters
----------
units : unit-like, tuple of unit-like, or `~astropy.units.StructuredUnit`
Tuples can be nested. If a `~astropy.units.StructuredUnit` is passed
in, it will be returned unchanged unless different names are requested.
names : tuple of str, tuple or list; `~numpy.dtype`; or `~astropy.units.StructuredUnit`, optional
Field names for the units, possibly nested. Can be inferred from a
structured `~numpy.dtype` or another `~astropy.units.StructuredUnit`.
For nested tuples, by default the name of the upper entry will be the
concatenation of the names of the lower levels. One can pass in a
list with the upper-level name and a tuple of lower-level names to
avoid this. For tuples, not all levels have to be given; for any level
not passed in, default field names of 'f0', 'f1', etc., will be used.
Notes
-----
It is recommended to initialize the class indirectly, using
`~astropy.units.Unit`. E.g., ``u.Unit('AU,AU/day')``.
When combined with a structured array to produce a structured
`~astropy.units.Quantity`, array field names will take precedence.
Generally, passing in ``names`` is needed only if the unit is used
unattached to a `~astropy.units.Quantity` and one needs to access its
fields.
Examples
--------
Various ways to initialize a `~astropy.units.StructuredUnit`::
>>> import astropy.units as u
>>> su = u.Unit('(AU,AU/day),yr')
>>> su
Unit("((AU, AU / d), yr)")
>>> su.field_names
(['f0', ('f0', 'f1')], 'f1')
>>> su['f1']
Unit("yr")
>>> su2 = u.StructuredUnit(((u.AU, u.AU/u.day), u.yr), names=(('p', 'v'), 't'))
>>> su2 == su
True
>>> su2.field_names
(['pv', ('p', 'v')], 't')
>>> su3 = u.StructuredUnit((su2['pv'], u.day), names=(['p_v', ('p', 'v')], 't'))
>>> su3.field_names
(['p_v', ('p', 'v')], 't')
>>> su3.keys()
('p_v', 't')
>>> su3.values()
(Unit("(AU, AU / d)"), Unit("d"))
Structured units share most methods with regular units::
>>> su.physical_type
astropy.units.structured.Structure((astropy.units.structured.Structure((PhysicalType('length'), PhysicalType({'speed', 'velocity'})), dtype=[('f0', 'O'), ('f1', 'O')]), PhysicalType('time')), dtype=[('f0', 'O'), ('f1', 'O')])
>>> su.si
Unit("((1.49598e+11 m, 1.73146e+06 m / s), 3.15576e+07 s)")
"""
def __new__(cls, units, names=None):
dtype = None
if names is not None:
if isinstance(names, StructuredUnit):
dtype = names._units.dtype
names = names.field_names
elif isinstance(names, np.dtype):
if not names.fields:
raise ValueError("dtype should be structured, with fields.")
dtype = np.dtype([(name, DTYPE_OBJECT) for name in names.names])
names = _names_from_dtype(names)
else:
if not isinstance(names, tuple):
names = (names,)
names = _normalize_names(names)
if not isinstance(units, tuple):
units = Unit(units)
if isinstance(units, StructuredUnit):
# Avoid constructing a new StructuredUnit if no field names
# are given, or if all field names are the same already anyway.
if names is None or units.field_names == names:
return units
# Otherwise, turn (the upper level) into a tuple, for renaming.
units = units.values()
else:
# Single regular unit: make a tuple for iteration below.
units = (units,)
if names is None:
names = tuple(f"f{i}" for i in range(len(units)))
elif len(units) != len(names):
raise ValueError("lengths of units and field names must match.")
converted = []
for unit, name in zip(units, names):
if isinstance(name, list):
# For list, the first item is the name of our level,
# and the second another tuple of names, i.e., we recurse.
unit = cls(unit, name[1])
name = name[0]
else:
# We are at the lowest level. Check unit.
unit = Unit(unit)
if dtype is not None and isinstance(unit, StructuredUnit):
raise ValueError(
"units do not match in depth with field "
"names from dtype or structured unit."
)
converted.append(unit)
self = super().__new__(cls)
if dtype is None:
dtype = np.dtype(
[
((name[0] if isinstance(name, list) else name), DTYPE_OBJECT)
for name in names
]
)
# Decay array to void so we can access by field name and number.
self._units = np.array(tuple(converted), dtype)[()]
return self
def __getnewargs__(self):
"""When de-serializing, e.g. pickle, start with a blank structure."""
return (), None
@property
def field_names(self):
"""Possibly nested tuple of the field names of the parts."""
return tuple(
([name, unit.field_names] if isinstance(unit, StructuredUnit) else name)
for name, unit in self.items()
)
# Allow StructuredUnit to be treated as an (ordered) mapping.
def __len__(self):
return len(self._units.dtype.names)
def __getitem__(self, item):
# Since we are based on np.void, indexing by field number works too.
return self._units[item]
def values(self):
return self._units.item()
def keys(self):
return self._units.dtype.names
def items(self):
return tuple(zip(self._units.dtype.names, self._units.item()))
def __iter__(self):
yield from self._units.dtype.names
# Helpers for methods below.
def _recursively_apply(self, func, cls=None):
"""Apply func recursively.
Parameters
----------
func : callable
Function to apply to all parts of the structured unit,
recursing as needed.
cls : type, optional
If given, should be a subclass of `~numpy.void`. By default,
will return a new `~astropy.units.StructuredUnit` instance.
"""
results = np.void(tuple(map(func, self.values())), self._units.dtype)
if cls is not None:
return results.view((cls, results.dtype))
# Short-cut; no need to interpret field names, etc.
result = super().__new__(self.__class__)
result._units = results
return result
def _recursively_get_dtype(self, value, enter_lists=True):
"""Get structured dtype according to value, using our field names.
This is useful since ``np.array(value)`` would treat tuples as lower
levels of the array, rather than as elements of a structured array.
The routine does presume that the type of the first tuple is
representative of the rest. Used in ``get_converter``.
For the special value of ``UNITY``, all fields are assumed to be 1.0,
and hence this will return an all-float dtype.
"""
if enter_lists:
while isinstance(value, list):
value = value[0]
if value is UNITY:
value = (UNITY,) * len(self)
elif not isinstance(value, tuple) or len(self) != len(value):
raise ValueError(f"cannot interpret value {value} for unit {self}.")
descr = []
for (name, unit), part in zip(self.items(), value):
if isinstance(unit, StructuredUnit):
descr.append(
(name, unit._recursively_get_dtype(part, enter_lists=False))
)
else:
# Got a part associated with a regular unit. Gets its dtype.
# Like for Quantity, we cast integers to float.
part = np.array(part)
part_dtype = part.dtype
if part_dtype.kind in "iu":
part_dtype = np.dtype(float)
descr.append((name, part_dtype, part.shape))
return np.dtype(descr)
@property
def si(self):
"""The `StructuredUnit` instance in SI units."""
return self._recursively_apply(operator.attrgetter("si"))
@property
def cgs(self):
"""The `StructuredUnit` instance in cgs units."""
return self._recursively_apply(operator.attrgetter("cgs"))
# Needed to pass through Unit initializer, so might as well use it.
@cached_property
def _physical_type_id(self):
return self._recursively_apply(
operator.attrgetter("_physical_type_id"), cls=Structure
)
@property
def physical_type(self):
"""Physical types of all the fields."""
return self._recursively_apply(
operator.attrgetter("physical_type"), cls=Structure
)
def decompose(self, bases: Collection[UnitBase] = ()) -> Self:
"""The `StructuredUnit` composed of only irreducible units.
Parameters
----------
bases : sequence of `~astropy.units.UnitBase`, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `UnitsError` if it's not possible
to do so.
Returns
-------
`~astropy.units.StructuredUnit`
With the unit for each field containing only irreducible units.
"""
return self._recursively_apply(operator.methodcaller("decompose", bases=bases))
def is_equivalent(self, other, equivalencies=[]):
"""`True` if all fields are equivalent to the other's fields.
Parameters
----------
other : `~astropy.units.StructuredUnit`
The structured unit to compare with, or what can initialize one.
equivalencies : list of tuple, optional
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`unit_equivalencies`.
The list will be applied to all fields.
Returns
-------
bool
"""
try:
other = StructuredUnit(other)
except Exception:
return False
if len(self) != len(other):
return False
for self_part, other_part in zip(self.values(), other.values()):
if not self_part.is_equivalent(other_part, equivalencies=equivalencies):
return False
return True
def get_converter(self, other, equivalencies=[]):
if not isinstance(other, type(self)):
other = self.__class__(other, names=self)
converters = [
self_part.get_converter(other_part, equivalencies=equivalencies)
for (self_part, other_part) in zip(self.values(), other.values())
]
def converter(value):
if not hasattr(value, "dtype"):
value = np.array(value, self._recursively_get_dtype(value))
result = np.empty_like(value)
for name, converter_ in zip(result.dtype.names, converters):
result[name] = converter_(value[name])
# Index with empty tuple to decay array scalars to numpy void.
return result if result.shape else result[()]
return converter
get_converter.__doc__ = UnitBase.get_converter.__doc__
def to(self, other, value=np._NoValue, equivalencies=[]):
"""Return values converted to the specified unit.
Parameters
----------
other : `~astropy.units.StructuredUnit`
The unit to convert to. If necessary, will be converted to
a `~astropy.units.StructuredUnit` using the dtype of ``value``.
value : array-like, optional
Value(s) in the current unit to be converted to the
specified unit. If a sequence, the first element must have
entries of the correct type to represent all elements (i.e.,
not have, e.g., a ``float`` where other elements have ``complex``).
If not given, assumed to have 1. in all fields.
equivalencies : list of tuple, optional
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
Returns
-------
values : scalar or array
Converted value(s).
Raises
------
UnitsError
If units are inconsistent
"""
if value is np._NoValue:
# We do not have UNITY as a default, since then the docstring
# would list 1.0 as default, yet one could not pass that in.
value = UNITY
return self.get_converter(other, equivalencies=equivalencies)(value)
def to_string(self, format="generic"):
"""Output the unit in the given format as a string.
Units are separated by commas.
Parameters
----------
format : `astropy.units.format.Base` subclass or str
The name of a format or a formatter class. If not
provided, defaults to the generic format.
Notes
-----
Structured units can be written to all formats, but can be
re-read only with 'generic'.
"""
parts = [part.to_string(format) for part in self.values()]
out_fmt = "({})" if len(self) > 1 else "({},)"
if format.startswith("latex"):
# Strip $ from parts and add them on the outside.
parts = [part[1:-1] for part in parts]
out_fmt = "$" + out_fmt + "$"
return out_fmt.format(", ".join(parts))
def _repr_latex_(self):
return self.to_string("latex")
__array_ufunc__ = None
def __mul__(self, other):
if isinstance(other, str):
try:
other = Unit(other, parse_strict="silent")
except Exception:
return NotImplemented
if isinstance(other, UnitBase):
new_units = tuple(part * other for part in self.values())
return self.__class__(new_units, names=self)
if isinstance(other, StructuredUnit):
return NotImplemented
# Anything not like a unit, try initialising as a structured quantity.
try:
from .quantity import Quantity
return Quantity(other, unit=self)
except Exception:
return NotImplemented
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
if isinstance(other, str):
try:
other = Unit(other, parse_strict="silent")
except Exception:
return NotImplemented
if isinstance(other, UnitBase):
new_units = tuple(part / other for part in self.values())
return self.__class__(new_units, names=self)
return NotImplemented
def __rlshift__(self, m):
try:
from .quantity import Quantity
return Quantity(m, self, copy=False, subok=True)
except Exception:
return NotImplemented
def __str__(self):
return self.to_string()
def __repr__(self):
return f'Unit("{self.to_string()}")'
def __hash__(self):
return hash(self.values())
def __eq__(self, other):
try:
other = StructuredUnit(other)
except Exception:
return NotImplemented
return self.values() == other.values()
def __ne__(self, other):
if not isinstance(other, type(self)):
try:
other = StructuredUnit(other)
except Exception:
return NotImplemented
return self.values() != other.values()
| StructuredUnit |
python | huggingface__transformers | src/transformers/models/efficientloftr/modeling_efficientloftr.py | {
"start": 30916,
"end": 39214
} | class ____(EfficientLoFTRPreTrainedModel):
def __init__(self, config: EfficientLoFTRConfig):
super().__init__(config)
self.config = config
self.backbone = EfficientLoFTRepVGG(config)
self.local_feature_transformer = EfficientLoFTRLocalFeatureTransformer(config)
self.rotary_emb = EfficientLoFTRRotaryEmbedding(config=config)
self.post_init()
@check_model_inputs()
@auto_docstring
def forward(
self,
pixel_values: torch.FloatTensor,
labels: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BackboneOutput:
r"""
Examples:
```python
>>> from transformers import AutoImageProcessor, AutoModel
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "https://github.com/magicleap/SuperGluePretrainedNetwork/blob/master/assets/phototourism_sample_images/london_bridge_78916675_4568141288.jpg?raw=true"
>>> image1 = Image.open(requests.get(url, stream=True).raw)
>>> url = "https://github.com/magicleap/SuperGluePretrainedNetwork/blob/master/assets/phototourism_sample_images/london_bridge_19481797_2295892421.jpg?raw=true"
>>> image2 = Image.open(requests.get(url, stream=True).raw)
>>> images = [image1, image2]
>>> processor = AutoImageProcessor.from_pretrained("zju-community/efficient_loftr")
>>> model = AutoModel.from_pretrained("zju-community/efficient_loftr")
>>> with torch.no_grad():
>>> inputs = processor(images, return_tensors="pt")
>>> outputs = model(**inputs)
```"""
if labels is not None:
raise ValueError("EfficientLoFTR is not trainable, no labels should be provided.")
if pixel_values.ndim != 5 or pixel_values.size(1) != 2:
raise ValueError("Input must be a 5D tensor of shape (batch_size, 2, num_channels, height, width)")
batch_size, _, channels, height, width = pixel_values.shape
pixel_values = pixel_values.reshape(batch_size * 2, channels, height, width)
pixel_values = self.extract_one_channel_pixel_values(pixel_values)
# 1. Local Feature CNN
features = self.backbone(pixel_values)
# Last stage outputs are coarse outputs
coarse_features = features[-1]
# Rest is residual features used in EfficientLoFTRFineFusionLayer
residual_features = features[:-1]
coarse_embed_dim, coarse_height, coarse_width = coarse_features.shape[-3:]
# 2. Coarse-level LoFTR module
cos, sin = self.rotary_emb(coarse_features)
cos = cos.expand(batch_size * 2, -1, -1, -1).reshape(batch_size * 2, -1, coarse_embed_dim)
sin = sin.expand(batch_size * 2, -1, -1, -1).reshape(batch_size * 2, -1, coarse_embed_dim)
position_embeddings = (cos, sin)
coarse_features = coarse_features.reshape(batch_size, 2, coarse_embed_dim, coarse_height, coarse_width)
coarse_features = self.local_feature_transformer(
coarse_features, position_embeddings=position_embeddings, **kwargs
)
features = (coarse_features,) + tuple(residual_features)
return BackboneOutput(feature_maps=features)
def mask_border(tensor: torch.Tensor, border_margin: int, value: Union[bool, float, int]) -> torch.Tensor:
"""
Mask a tensor border with a given value
Args:
tensor (`torch.Tensor` of shape `(batch_size, height_0, width_0, height_1, width_1)`):
The tensor to mask
border_margin (`int`) :
The size of the border
value (`Union[bool, int, float]`):
The value to place in the tensor's borders
Returns:
tensor (`torch.Tensor` of shape `(batch_size, height_0, width_0, height_1, width_1)`):
The masked tensor
"""
if border_margin <= 0:
return tensor
tensor[:, :border_margin] = value
tensor[:, :, :border_margin] = value
tensor[:, :, :, :border_margin] = value
tensor[:, :, :, :, :border_margin] = value
tensor[:, -border_margin:] = value
tensor[:, :, -border_margin:] = value
tensor[:, :, :, -border_margin:] = value
tensor[:, :, :, :, -border_margin:] = value
return tensor
def create_meshgrid(
height: Union[int, torch.Tensor],
width: Union[int, torch.Tensor],
normalized_coordinates: bool = False,
device: Optional[torch.device] = None,
dtype: Optional[torch.dtype] = None,
) -> torch.Tensor:
"""
Copied from kornia library : kornia/kornia/utils/grid.py:26
Generate a coordinate grid for an image.
When the flag ``normalized_coordinates`` is set to True, the grid is
normalized to be in the range :math:`[-1,1]` to be consistent with the pytorch
function :py:func:`torch.nn.functional.grid_sample`.
Args:
height (`int`):
The image height (rows).
width (`int`):
The image width (cols).
normalized_coordinates (`bool`):
Whether to normalize coordinates in the range :math:`[-1,1]` in order to be consistent with the
PyTorch function :py:func:`torch.nn.functional.grid_sample`.
device (`torch.device`):
The device on which the grid will be generated.
dtype (`torch.dtype`):
The data type of the generated grid.
Return:
grid (`torch.Tensor` of shape `(1, height, width, 2)`):
The grid tensor.
Example:
>>> create_meshgrid(2, 2)
tensor([[[[-1., -1.],
[ 1., -1.]],
<BLANKLINE>
[[-1., 1.],
[ 1., 1.]]]])
>>> create_meshgrid(2, 2, normalized_coordinates=False)
tensor([[[[0., 0.],
[1., 0.]],
<BLANKLINE>
[[0., 1.],
[1., 1.]]]])
"""
xs = torch.linspace(0, width - 1, width, device=device, dtype=dtype)
ys = torch.linspace(0, height - 1, height, device=device, dtype=dtype)
if normalized_coordinates:
xs = (xs / (width - 1) - 0.5) * 2
ys = (ys / (height - 1) - 0.5) * 2
grid = torch.stack(torch.meshgrid(ys, xs, indexing="ij"), dim=-1)
grid = grid.permute(1, 0, 2).unsqueeze(0)
return grid
def spatial_expectation2d(input: torch.Tensor, normalized_coordinates: bool = True) -> torch.Tensor:
r"""
Copied from kornia library : kornia/geometry/subpix/dsnt.py:76
Compute the expectation of coordinate values using spatial probabilities.
The input heatmap is assumed to represent a valid spatial probability distribution,
which can be achieved using :func:`~kornia.geometry.subpixel.spatial_softmax2d`.
Args:
input (`torch.Tensor` of shape `(batch_size, embed_dim, height, width)`):
The input tensor representing dense spatial probabilities.
normalized_coordinates (`bool`):
Whether to return the coordinates normalized in the range of :math:`[-1, 1]`. Otherwise, it will return
the coordinates in the range of the input shape.
Returns:
output (`torch.Tensor` of shape `(batch_size, embed_dim, 2)`)
Expected value of the 2D coordinates. Output order of the coordinates is (x, y).
Examples:
>>> heatmaps = torch.tensor([[[
... [0., 0., 0.],
... [0., 0., 0.],
... [0., 1., 0.]]]])
>>> spatial_expectation2d(heatmaps, False)
tensor([[[1., 2.]]])
"""
batch_size, embed_dim, height, width = input.shape
# Create coordinates grid.
grid = create_meshgrid(height, width, normalized_coordinates, input.device)
grid = grid.to(input.dtype)
pos_x = grid[..., 0].reshape(-1)
pos_y = grid[..., 1].reshape(-1)
input_flat = input.view(batch_size, embed_dim, -1)
# Compute the expectation of the coordinates.
expected_y = torch.sum(pos_y * input_flat, -1, keepdim=True)
expected_x = torch.sum(pos_x * input_flat, -1, keepdim=True)
output = torch.cat([expected_x, expected_y], -1)
return output.view(batch_size, embed_dim, 2)
@auto_docstring(
custom_intro="""
EfficientLoFTR model taking images as inputs and outputting the matching of them.
"""
)
| EfficientLoFTRModel |
python | django__django | tests/model_options/test_default_pk.py | {
"start": 273,
"end": 4676
} | class ____(SimpleTestCase):
def test_default_value_of_default_auto_field_setting(self):
"""django.conf.global_settings defaults to BigAutoField."""
class MyModel(models.Model):
pass
self.assertIsInstance(MyModel._meta.pk, models.BigAutoField)
@override_settings(DEFAULT_AUTO_FIELD="django.db.models.NonexistentAutoField")
def test_default_auto_field_setting_nonexistent(self):
msg = (
"DEFAULT_AUTO_FIELD refers to the module "
"'django.db.models.NonexistentAutoField' that could not be "
"imported."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
class Model(models.Model):
pass
@isolate_apps("model_options.apps.ModelPKNonexistentConfig")
def test_app_default_auto_field_nonexistent(self):
msg = (
"model_options.apps.ModelPKNonexistentConfig.default_auto_field "
"refers to the module 'django.db.models.NonexistentAutoField' "
"that could not be imported."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
class Model(models.Model):
pass
@override_settings(DEFAULT_AUTO_FIELD="django.db.models.TextField")
def test_default_auto_field_setting_non_auto(self):
msg = (
"Primary key 'django.db.models.TextField' referred by "
"DEFAULT_AUTO_FIELD must subclass AutoField."
)
with self.assertRaisesMessage(ValueError, msg):
class Model(models.Model):
pass
@isolate_apps("model_options.apps.ModelPKNonAutoConfig")
def test_app_default_auto_field_non_auto(self):
msg = (
"Primary key 'django.db.models.TextField' referred by "
"model_options.apps.ModelPKNonAutoConfig.default_auto_field must "
"subclass AutoField."
)
with self.assertRaisesMessage(ValueError, msg):
class Model(models.Model):
pass
@override_settings(DEFAULT_AUTO_FIELD=None)
def test_default_auto_field_setting_none(self):
msg = "DEFAULT_AUTO_FIELD must not be empty."
with self.assertRaisesMessage(ImproperlyConfigured, msg):
class Model(models.Model):
pass
@isolate_apps("model_options.apps.ModelPKNoneConfig")
def test_app_default_auto_field_none(self):
msg = (
"model_options.apps.ModelPKNoneConfig.default_auto_field must not "
"be empty."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
class Model(models.Model):
pass
@isolate_apps("model_options.apps.ModelDefaultPKConfig")
@override_settings(DEFAULT_AUTO_FIELD="django.db.models.SmallAutoField")
def test_default_auto_field_setting(self):
class Model(models.Model):
pass
self.assertIsInstance(Model._meta.pk, models.SmallAutoField)
@override_settings(
DEFAULT_AUTO_FIELD="model_options.test_default_pk.MyBigAutoField"
)
def test_default_auto_field_setting_bigautofield_subclass(self):
class Model(models.Model):
pass
self.assertIsInstance(Model._meta.pk, MyBigAutoField)
@isolate_apps("model_options.apps.ModelPKConfig")
@override_settings(DEFAULT_AUTO_FIELD="django.db.models.AutoField")
def test_app_default_auto_field(self):
class Model(models.Model):
pass
self.assertIsInstance(Model._meta.pk, models.SmallAutoField)
@isolate_apps("model_options.apps.ModelDefaultPKConfig")
@override_settings(DEFAULT_AUTO_FIELD="django.db.models.SmallAutoField")
def test_m2m_default_auto_field_setting(self):
class M2MModel(models.Model):
m2m = models.ManyToManyField("self")
m2m_pk = M2MModel._meta.get_field("m2m").remote_field.through._meta.pk
self.assertIsInstance(m2m_pk, models.SmallAutoField)
@isolate_apps("model_options.apps.ModelPKConfig")
@override_settings(DEFAULT_AUTO_FIELD="django.db.models.AutoField")
def test_m2m_app_default_auto_field(self):
class M2MModel(models.Model):
m2m = models.ManyToManyField("self")
m2m_pk = M2MModel._meta.get_field("m2m").remote_field.through._meta.pk
self.assertIsInstance(m2m_pk, models.SmallAutoField)
| TestDefaultPK |
python | dagster-io__dagster | python_modules/libraries/dagster-aws/dagster_aws/ssm/resources.py | {
"start": 769,
"end": 4445
} | class ____(ResourceWithBoto3Configuration):
"""Resource that gives access to AWS Systems Manager Parameter Store.
The underlying Parameter Store session is created by calling
:py:func:`boto3.session.Session(profile_name) <boto3:boto3.session>`.
The returned resource object is a Systems Manager client, an instance of `botocore.client.ssm`.
Example:
.. code-block:: python
from typing import Any
from dagster import build_op_context, job, op
from dagster_aws.ssm import SSMResource
@op
def example_ssm_op(ssm: SSMResource):
return ssm.get_client().get_parameter(
Name="a_parameter"
)
@job
def example_job():
example_ssm_op()
Definitions(
jobs=[example_job],
resources={
'ssm': SSMResource(
region_name='us-west-1'
)
}
)
"""
@classmethod
def _is_dagster_maintained(cls) -> bool:
return True
def get_client(self) -> "botocore.client.ssm": # pyright: ignore (reportAttributeAccessIssue)
return construct_ssm_client(
max_attempts=self.max_attempts,
region_name=self.region_name,
profile_name=self.profile_name,
endpoint_url=self.endpoint_url,
use_ssl=self.use_ssl,
verify=self.verify,
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
aws_session_token=self.aws_session_token,
)
@beta
@dagster_maintained_resource
@resource(config_schema=SSMResource.to_config_schema())
def ssm_resource(context) -> "botocore.client.ssm": # pyright: ignore (reportAttributeAccessIssue)
"""Resource that gives access to AWS Systems Manager Parameter Store.
The underlying Parameter Store session is created by calling
:py:func:`boto3.session.Session(profile_name) <boto3:boto3.session>`.
The returned resource object is a Systems Manager client, an instance of `botocore.client.ssm`.
Example:
.. code-block:: python
from dagster import build_op_context, job, op
from dagster_aws.ssm import ssm_resource
@op(required_resource_keys={'ssm'})
def example_ssm_op(context):
return context.resources.ssm.get_parameter(
Name="a_parameter"
)
@job(resource_defs={'ssm': ssm_resource})
def example_job():
example_ssm_op()
example_job.execute_in_process(
run_config={
'resources': {
'ssm': {
'config': {
'region_name': 'us-west-1',
}
}
}
}
)
You may configure this resource as follows:
.. code-block:: YAML
resources:
parameter_store:
config:
region_name: "us-west-1"
# Optional[str]: Specifies a custom region for the Parameter Store session. Default is chosen
# through the ordinary boto credential chain.
profile_name: "dev"
# Optional[str]: Specifies a custom profile for Parameter Store session. Default is default
# profile as specified in ~/.aws/credentials file
"""
return SSMResource.from_resource_context(context).get_client()
@beta
| SSMResource |
python | Netflix__metaflow | test/test_config/card_config.py | {
"start": 64,
"end": 400
} | class ____(FlowSpec):
config = Config("config", default_value="")
@card(type=config.type)
@step
def start(self):
print("card type", self.config.type)
self.next(self.end)
@step
def end(self):
print("full config", self.config)
if __name__ == "__main__":
CardConfigFlow()
| CardConfigFlow |
python | great-expectations__great_expectations | tests/datasource/fluent/data_asset/test_sql_asset.py | {
"start": 2991,
"end": 3285
} | class ____:
def __init__(self, queried_row: Any):
self._queried_row = queried_row
def execute(self, *args, **kwargs):
return FakeResult(self._queried_row)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb): ...
| FakeConnection |
python | getsentry__sentry | tests/sentry/relocation/tasks/test_transfer.py | {
"start": 7757,
"end": 9656
} | class ____(TestCase):
def test_missing_transfer(self) -> None:
res = process_relocation_transfer_region(transfer_id=999)
assert res is None
def test_transfer_request_state(self) -> None:
transfer = create_region_relocation_transfer(
organization=self.organization,
state=RelocationTransferState.Request,
)
process_relocation_transfer_region(transfer_id=transfer.id)
# Should be removed as something has gone off the rails
assert not RegionRelocationTransfer.objects.filter(id=transfer.id).exists()
def test_transfer_reply_state(self) -> None:
organization = self.organization
relocation = Relocation.objects.create(
creator_id=self.user.id,
owner_id=self.user.id,
want_org_slugs=["acme-org"],
step=Relocation.Step.UPLOADING.value,
)
transfer = create_region_relocation_transfer(
organization=organization,
relocation_uuid=relocation.uuid,
state=RelocationTransferState.Reply,
)
relocation_storage = get_relocation_storage()
relocation_storage.save(
f"runs/{relocation.uuid}/saas_to_saas_export/{organization.slug}.tar",
BytesIO(b"export data"),
)
process_relocation_transfer_region(transfer_id=transfer.id)
# Should be removed on completion.
assert not RegionRelocationTransfer.objects.filter(id=transfer.id).exists()
with assume_test_silo_mode(SiloMode.CONTROL):
assert ControlRelocationTransfer.objects.filter(
state=RelocationTransferState.Reply,
org_slug=organization.slug,
exporting_region=transfer.exporting_region,
requesting_region=transfer.requesting_region,
).exists()
| ProcessRelocationTransferRegionTest |
python | getsentry__sentry | src/sentry/testutils/cases.py | {
"start": 101138,
"end": 101722
} | class ____(APITestCase):
provider = "dummy"
def setUp(self):
super().setUp()
with assume_test_silo_mode(SiloMode.CONTROL):
self.auth_provider_inst = AuthProviderModel(
organization_id=self.organization.id, provider=self.provider
)
self.auth_provider_inst.enable_scim(self.user)
self.auth_provider_inst.save()
self.scim_user = ApiToken.objects.get(
token=self.auth_provider_inst.get_scim_token()
).user
self.login_as(user=self.scim_user)
| SCIMTestCase |
python | patrick-kidger__equinox | equinox/_ad.py | {
"start": 19849,
"end": 25834
} | class ____(Module):
# Important that `jaxpr` be a leaf (and not static), so that it is a tuple element
# when passing through `filter_primitive_bind` and thus visible to
# `jax.core.subjaxprs`
jaxpr: jax.extend.core.Jaxpr
consts: PyTree[ArrayLike] # Captured in the PyTree structure of _ClosureConvert
in_dynamic_struct: _FlatPyTree[jax.ShapeDtypeStruct] = field(static=True)
out_dynamic_struct: _FlatPyTree[jax.ShapeDtypeStruct] = field(static=True)
in_static: _FlatPyTree[Any] = field(static=True)
out_static: _FlatPyTree[Any] = field(static=True)
@property
def in_struct(self):
dynamic = _unflatten(self.in_dynamic_struct)
static = _unflatten(self.in_static)
return combine(dynamic, static)
@property
def out_struct(self):
dynamic = _unflatten(self.out_dynamic_struct)
static = _unflatten(self.out_static)
return combine(dynamic, static)
def __call__(self, *args, **kwargs):
in_dynamic = _check_closure_convert_input(self, args, kwargs)
in_dynamic_flat = jtu.tree_leaves(in_dynamic)
out_dynamic_flat = jax.core.eval_jaxpr(
self.jaxpr, self.consts, *in_dynamic_flat
)
self_out_dynamic_struct = _unflatten(self.out_dynamic_struct)
self_out_static = _unflatten(self.out_static)
out_dynamic_struct_flat, out_dynamic_treedef = jtu.tree_flatten(
self_out_dynamic_struct
)
assert len(out_dynamic_flat) == len(out_dynamic_struct_flat)
for o1, o2 in zip(out_dynamic_flat, out_dynamic_struct_flat):
assert jnp.shape(o1) == o2.shape
assert jnp.result_type(o1) == jnp.result_type(o2)
out = jtu.tree_unflatten(out_dynamic_treedef, out_dynamic_flat)
out = combine(out, self_out_static)
return out
def filter_closure_convert(fn: Callable[_P, _T], *args, **kwargs) -> Callable[_P, _T]:
"""As `jax.closure_convert`, but works on functions accepting and returning
arbitrary PyTree objects. In addition, all JAX arrays are hoisted into constants
(not just floating point arrays).
This is useful for explicitly capturing any closed-over JAX tracers
before crossing an API boundary, such as `jax.grad`, `jax.custom_vjp`, or the
rule of a custom primitive.
**Arguments:**
- `fn`: The function to call. Will be called as `fun(*args, **kwargs)`.
- `args`, `kwargs`: Example arguments at which to call the function. The function is
not actually evaluated on these arguments; all JAX arrays are substituted for
tracers. Note that Python builtins (`bool`, `int`, `float`, `complex`) are
not substituted for tracers and are passed through as-is.
**Returns:**
A new function, which can be called in the same way, using `*args` and `**kwargs`.
Will contain all closed-over tracers of `fn` as part of its PyTree structure.
!!! Example
```python
@jax.grad
def f(x, y):
z = x + y
g = lambda a: z + a # closes over z
g2 = filter_closure_convert(g, 1)
assert [id(b) for b in g2.consts] == [id(z)]
return z
f(1., 1.)
```
"""
in_dynamic, in_static = partition((args, kwargs), _is_struct)
# Strip `weak_dtype`. This didn't used to exist on `jax.ShapeDtypeStruct`, and then
# got added: https://github.com/patrick-kidger/equinox/issues/854
#
# If we were writing from scratch then we'd keep this in, but for backward
# compatibility we instead strip it and treat every dtype as non-weak.
#
# Note that there are *two* kinds of backward compatibility we're thinking about
# here. The first more important kind of backward compatibility is when doing
# something like
# ```python
# g = filter_closure_convert(f, some_array)
# g(some_int)
# ```
# (which indeed is the case that's exploding in the linked issue above). This worked
# before! We'd like it to keep working.
#
# The second, less important, is how we trace the current function into a jaxpr.
# Whether we trace with weak dtypes or not can give different results.
# In this case, we all survived for a long time without even noticing we were doing
# this... so probably we're actually happy with either choice.
# Regardless, stripping weak dtypes here again means that we obtain the same
# behaviour as before.
in_dynamic_struct = _strip_weak_dtype(jax.eval_shape(lambda: in_dynamic))
in_dynamic_struct = jtu.tree_flatten(in_dynamic_struct)
in_static = jtu.tree_flatten(in_static)
if isinstance(fn, types.FunctionType) and fn.__closure__ is None:
# In this case, it's not possible to have any closed-over tracers.
# Skip jaxpr tracing for efficiency.
closure_converted = _TrivialClosureConvert(fn, in_dynamic_struct, in_static)
else:
fn = cast(Callable[_P, _T], fn)
closed_jaxpr, out_dynamic_struct, out_static = filter_make_jaxpr(fn)(
*args,
**kwargs,
)
jaxpr = closed_jaxpr.jaxpr
consts = closed_jaxpr.consts
out_dynamic_struct = jtu.tree_flatten(out_dynamic_struct)
out_static = jtu.tree_flatten(out_static)
closure_converted = _ClosureConvert(
jaxpr, consts, in_dynamic_struct, out_dynamic_struct, in_static, out_static
)
closure_converted = cast(Callable[_P, _T], closure_converted)
return closure_converted
def _materialise_symbolic_zero(x, grad_x):
if grad_x is None and is_inexact_array(x):
return jnp.zeros_like(x)
else:
return grad_x
def _drop_nondiff(tangent, primal):
if isinstance(tangent, jax.custom_derivatives.SymbolicZero):
return None
elif jnp.issubdtype(jnp.result_type(primal), jnp.inexact):
# Work around JAX issue #16000
return tangent
else:
return None
| _ClosureConvert |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/event/base.py | {
"start": 1744,
"end": 2303
} | class ____:
"""Serializable callable that re-generates an instance of
:class:`_Dispatch` given a particular :class:`.Events` subclass.
"""
def __call__(self, _instance_cls: Type[_ET]) -> _Dispatch[_ET]:
for cls in _instance_cls.__mro__:
if "dispatch" in cls.__dict__:
return cast(
"_Dispatch[_ET]", cls.__dict__["dispatch"].dispatch
)._for_class(_instance_cls)
else:
raise AttributeError("No class with a 'dispatch' member present.")
| _UnpickleDispatch |
python | dagster-io__dagster | python_modules/libraries/dagster-msteams/dagster_msteams/adaptive_card.py | {
"start": 25,
"end": 1344
} | class ____:
"""Class to contruct a MS Teams adaptive card for posting Dagster messages."""
def __init__(self, adaptive_card_version: str = "1.5"):
"""Constructs an adaptive card with the given version.
Args:
adaptive_card_version (str): The version of the adaptive card to use. Defaults to "1.5".
"""
self._body = []
self._adaptive_card_version = adaptive_card_version
@property
def payload(self) -> dict[str, Any]:
return {
"type": "message",
"attachments": [
{
"contentType": "application/vnd.microsoft.card.adaptive",
"contentUrl": None,
"content": {
"type": "AdaptiveCard",
"version": self._adaptive_card_version,
"body": self._body,
},
}
],
}
def _build_text_block(self, text: str) -> dict[str, Any]:
return {
"type": "TextBlock",
"text": text,
"wrap": True,
}
def add_attachment(self, text_message: str) -> None:
"""Appends a text message to the adaptive card."""
self._body.append(self._build_text_block(text_message))
| AdaptiveCard |
python | pytorch__pytorch | torch/_dynamo/utils.py | {
"start": 32158,
"end": 34457
} | class ____(logging.Formatter):
"""Logging formatter that strips ANSI escape codes."""
def format(self, record):
msg = super().format(record)
return ANSI_ESCAPE_PATTERN.sub("", msg)
def add_file_handler() -> contextlib.ExitStack:
log_path = os.path.join(get_debug_dir(), "torchdynamo")
os.makedirs(log_path, exist_ok=True)
log_file_handler = logging.FileHandler(os.path.join(log_path, "debug.log"))
log_file_handler.setFormatter(StripAnsiFormatter("%(message)s"))
logger = logging.getLogger("torch._dynamo")
logger.addHandler(log_file_handler)
exitstack = contextlib.ExitStack()
exitstack.callback(lambda: logger.removeHandler(log_file_handler))
return exitstack
def setup_log_file() -> contextlib.ExitStack:
exitstack = contextlib.ExitStack()
if config.log_file_name is not None:
log_file_handler = logging.FileHandler(config.log_file_name)
for logger in torch._logging._internal.get_loggers():
logger.addHandler(log_file_handler)
exitstack.callback(lambda: logger.removeHandler(log_file_handler))
return exitstack
return exitstack
def gen_record_file_name(exc: Exception, code: CodeType) -> str:
return f"{get_debug_dir()}/error_recordings/\
{code.co_name}_{type(exc).__name__}_{code.co_firstlineno}.rec"
def write_record_to_file(filename: str, exec_record: ExecutionRecord) -> None:
try:
if os.path.exists(filename):
log.warning(
"Unable to write execution record %s; file already exists.", filename
)
else:
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, "wb") as f:
exec_record.dump(f)
except Exception:
log.exception("Unable to write execution record %s", filename)
def count_calls(g: fx.Graph) -> int:
c = 0
for n in g.nodes:
if "call" in n.op:
c += 1
return c
def identity(x: T) -> T:
return x
def hashable(x: Any) -> bool:
try:
hash(x)
return True
except TypeError:
return False
# cannot hash writable memoryview object
except ValueError:
return False
def nothing(*args: Any, **kwargs: Any) -> None:
pass
| StripAnsiFormatter |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 95841,
"end": 96493
} | class ____(BaseModel):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(extra="allow", frozen=True)
code: Optional[TerminationCode] = Field(
None, description="Status code indicating why a cluster was terminated."
)
parameters: Optional[ParameterPair] = Field(
None,
description=(
"Object containing a set of parameters that provide information about why a"
" cluster was terminated."
),
)
type: Optional[TerminationType] = Field(
None, description="Reason indicating why a cluster was terminated."
)
| TerminationReason |
python | apache__airflow | providers/smtp/tests/unit/smtp/notifications/test_smtp.py | {
"start": 8571,
"end": 12737
} | class ____:
@pytest.fixture
def mock_smtp_client(self):
"""Create a mock SMTP object with async capabilities."""
mock_smtp = AsyncMock()
mock_smtp.asend_email_smtp = AsyncMock()
return mock_smtp
@pytest.fixture
def mock_smtp_hook(self, mock_smtp_client):
"""Set up the SMTP hook with async context manager."""
with mock.patch("airflow.providers.smtp.notifications.smtp.SmtpHook") as mock_hook:
mock_hook.return_value.__aenter__ = AsyncMock(return_value=mock_smtp_client)
yield mock_hook
@pytest.mark.asyncio
async def test_async_notifier(self, mock_smtp_hook, mock_smtp_client, create_dag_without_db):
notifier = SmtpNotifier(
**NOTIFIER_DEFAULT_PARAMS, context={"dag": create_dag_without_db(TEST_DAG_ID)}
)
await notifier.async_notify({"dag": create_dag_without_db(TEST_DAG_ID)})
mock_smtp_client.asend_email_smtp.assert_called_once_with(
smtp_conn_id=SMTP_CONN_ID,
**NOTIFIER_DEFAULT_PARAMS,
**DEFAULT_EMAIL_PARAMS,
)
async def test_async_notifier_with_notifier_class(
self, mock_smtp_hook, mock_smtp_client, create_dag_without_db
):
notifier = SmtpNotifier(
**NOTIFIER_DEFAULT_PARAMS, context={"dag": create_dag_without_db(TEST_DAG_ID)}
)
await notifier
mock_smtp_client.asend_email_smtp.assert_called_once_with(
smtp_conn_id=SMTP_CONN_ID,
**NOTIFIER_DEFAULT_PARAMS,
**DEFAULT_EMAIL_PARAMS,
)
async def test_async_notifier_templated(self, mock_smtp_hook, mock_smtp_client, create_dag_without_db):
notifier = SmtpNotifier(
from_email=TEMPLATED_SENDER.template,
to=TEMPLATED_RECEIVER.template,
subject=TEMPLATED_SUBJECT.template,
html_content=TEMPLATED_BODY.template,
context={"dag": create_dag_without_db(TEST_DAG_ID)},
)
await notifier
mock_smtp_client.asend_email_smtp.assert_called_once_with(
smtp_conn_id=SMTP_CONN_ID,
from_email=TEMPLATED_SENDER.rendered,
to=TEMPLATED_RECEIVER.rendered,
subject=TEMPLATED_SUBJECT.rendered,
html_content=TEMPLATED_BODY.rendered,
**DEFAULT_EMAIL_PARAMS,
)
async def test_async_notifier_with_defaults(
self, mock_smtp_hook, mock_smtp_client, create_dag_without_db, mock_task_instance
):
mock_smtp_client.subject_template = None
mock_smtp_client.html_content_template = None
mock_smtp_client.from_email = None
notifier = SmtpNotifier(
**NOTIFIER_DEFAULT_PARAMS, context={"dag": create_dag_without_db(TEST_DAG_ID)}
)
await notifier
mock_smtp_client.asend_email_smtp.assert_called_once_with(
smtp_conn_id=SMTP_CONN_ID,
**NOTIFIER_DEFAULT_PARAMS,
**DEFAULT_EMAIL_PARAMS,
)
async def test_async_notifier_with_nondefault_connection_extra(
self, mock_smtp_hook, mock_smtp_client, create_dag_without_db, mock_task_instance
):
with (
tempfile.NamedTemporaryFile(mode="wt", suffix=".txt") as f_subject,
tempfile.NamedTemporaryFile(mode="wt", suffix=".txt") as f_content,
):
f_subject.write(TEST_SUBJECT)
f_subject.flush()
f_content.write(TEST_BODY)
f_content.flush()
mock_smtp_client.from_email = TEST_SENDER
mock_smtp_client.subject_template = f_subject.name
mock_smtp_client.html_content_template = f_content.name
notifier = SmtpNotifier(to=TEST_RECEIVER, context={"dag": create_dag_without_db(TEST_DAG_ID)})
await notifier
mock_smtp_client.asend_email_smtp.assert_called_once_with(
smtp_conn_id=SMTP_CONN_ID,
from_email=TEST_SENDER,
to=TEST_RECEIVER,
subject=TEST_SUBJECT,
html_content=TEST_BODY,
**DEFAULT_EMAIL_PARAMS,
)
| TestSmtpNotifierAsync |
python | getsentry__sentry | tests/snuba/api/endpoints/test_organization_events_cross_trace.py | {
"start": 113,
"end": 10696
} | class ____(OrganizationEventsEndpointTestBase):
def test_cross_trace_query_with_logs(self) -> None:
trace_id = uuid.uuid4().hex
excluded_trace_id = uuid.uuid4().hex
logs = [
self.create_ourlog(
{"body": "foo", "trace_id": trace_id},
timestamp=self.ten_mins_ago,
),
self.create_ourlog(
{"body": "bar", "trace_id": excluded_trace_id},
timestamp=self.nine_mins_ago,
),
]
self.store_ourlogs(logs)
self.store_spans(
[
# only this event should show up since we'll filtered to trace_id
self.create_span(
{
"description": "baz",
"sentry_tags": {"status": "success"},
"tags": {"foo": "five"},
"trace_id": trace_id,
},
start_ts=self.ten_mins_ago,
),
self.create_span(
{
"description": "baz",
"sentry_tags": {"status": "success"},
"tags": {"foo": "six"},
"trace_id": excluded_trace_id,
},
start_ts=self.ten_mins_ago,
),
],
is_eap=True,
)
response = self.do_request(
{
"field": ["tags[foo]", "count()"],
"query": "description:baz",
"orderby": "count()",
"project": self.project.id,
"dataset": "spans",
"logQuery": ["message:foo"],
}
)
assert response.status_code == 200, response.content
assert len(response.data["data"]) == 1
assert response.data["data"][0]["tags[foo]"] == "five"
def test_cross_trace_query_with_spans(self) -> None:
trace_id = uuid.uuid4().hex
excluded_trace_id = uuid.uuid4().hex
self.store_spans(
[
# only this event should show up since we'll filtered to trace_id
self.create_span(
{
"description": "baz",
"sentry_tags": {"status": "success"},
"tags": {"foo": "five"},
"trace_id": trace_id,
},
start_ts=self.ten_mins_ago,
),
self.create_span(
{
"description": "boo",
"sentry_tags": {"status": "success"},
"tags": {"foo": "six"},
"trace_id": trace_id,
},
start_ts=self.ten_mins_ago,
),
self.create_span(
{
"description": "baz",
"sentry_tags": {"status": "success"},
"tags": {"foo": "seven"},
"trace_id": excluded_trace_id,
},
start_ts=self.ten_mins_ago,
),
],
is_eap=True,
)
response = self.do_request(
{
"field": ["tags[foo]", "count()"],
"query": "description:baz",
"orderby": "count()",
"project": self.project.id,
"dataset": "spans",
"spanQuery": ["tags[foo]:six"],
}
)
assert response.status_code == 200, response.content
assert len(response.data["data"]) == 1
assert response.data["data"][0]["tags[foo]"] == "five"
def test_cross_trace_query_with_spans_and_logs(self) -> None:
trace_id = uuid.uuid4().hex
excluded_trace_id = uuid.uuid4().hex
# Both of these traces will be valid
logs = [
self.create_ourlog(
{"body": "foo", "trace_id": trace_id},
timestamp=self.ten_mins_ago,
),
self.create_ourlog(
{"body": "foo", "trace_id": excluded_trace_id},
timestamp=self.nine_mins_ago,
),
]
self.store_ourlogs(logs)
self.store_spans(
[
# only this event should show up since we'll filtered to trace_id
self.create_span(
{
"description": "baz",
"sentry_tags": {"status": "success"},
"tags": {"foo": "five"},
"trace_id": trace_id,
},
start_ts=self.ten_mins_ago,
),
# we should only get this trace
self.create_span(
{
"description": "boo",
"sentry_tags": {"status": "success"},
"tags": {"foo": "six"},
"trace_id": trace_id,
},
start_ts=self.ten_mins_ago,
),
self.create_span(
{
"description": "baz",
"sentry_tags": {"status": "success"},
"tags": {"foo": "seven"},
"trace_id": excluded_trace_id,
},
start_ts=self.ten_mins_ago,
),
],
is_eap=True,
)
response = self.do_request(
{
"field": ["tags[foo]", "count()"],
"query": "description:baz",
"orderby": "count()",
"project": self.project.id,
"dataset": "spans",
"spanQuery": ["tags[foo]:six"],
"logQuery": ["message:foo"],
}
)
assert response.status_code == 200, response.content
assert len(response.data["data"]) == 1
assert response.data["data"][0]["tags[foo]"] == "five"
def test_cross_trace_query_with_multiple_spans(self) -> None:
trace_id = uuid.uuid4().hex
excluded_trace_id = uuid.uuid4().hex
self.store_spans(
[
# only this event should show up since we'll filtered to trace_id
self.create_span(
{
"description": "baz",
"sentry_tags": {"status": "success"},
"tags": {"foo": "five"},
"trace_id": trace_id,
},
start_ts=self.ten_mins_ago,
),
# we should only get this trace
self.create_span(
{
"description": "boo",
"sentry_tags": {"status": "success"},
"tags": {"foo": "six"},
"trace_id": trace_id,
},
start_ts=self.ten_mins_ago,
),
self.create_span(
{
"description": "bam",
"sentry_tags": {"status": "success"},
"tags": {"foo": "seven"},
"trace_id": trace_id,
},
start_ts=self.ten_mins_ago,
),
self.create_span(
{
"description": "baz",
"sentry_tags": {"status": "success"},
"tags": {"foo": "eight"},
"trace_id": excluded_trace_id,
},
start_ts=self.ten_mins_ago,
),
],
is_eap=True,
)
response = self.do_request(
{
"field": ["tags[foo]", "count()"],
"query": "description:baz",
"orderby": "count()",
"project": self.project.id,
"dataset": "spans",
"spanQuery": ["tags[foo]:six", "tags[foo]:seven"],
}
)
assert response.status_code == 200, response.content
assert len(response.data["data"]) == 1
assert response.data["data"][0]["tags[foo]"] == "five"
def test_cross_trace_qurey_with_multiple_logs(self) -> None:
trace_id = uuid.uuid4().hex
excluded_trace_id = uuid.uuid4().hex
logs = [
self.create_ourlog(
{"body": "foo", "trace_id": trace_id},
timestamp=self.ten_mins_ago,
),
self.create_ourlog(
{"body": "faa", "trace_id": trace_id},
timestamp=self.ten_mins_ago,
),
self.create_ourlog(
{"body": "bar", "trace_id": excluded_trace_id},
timestamp=self.nine_mins_ago,
),
]
self.store_ourlogs(logs)
self.store_spans(
[
# only this event should show up since we'll filtered to trace_id
self.create_span(
{
"description": "baz",
"sentry_tags": {"status": "success"},
"tags": {"foo": "five"},
"trace_id": trace_id,
},
start_ts=self.ten_mins_ago,
),
self.create_span(
{
"description": "baz",
"sentry_tags": {"status": "success"},
"tags": {"foo": "eight"},
"trace_id": excluded_trace_id,
},
start_ts=self.ten_mins_ago,
),
],
is_eap=True,
)
response = self.do_request(
{
"field": ["tags[foo]", "count()"],
"query": "description:baz",
"orderby": "count()",
"project": self.project.id,
"dataset": "spans",
"logQuery": ["message:faa", "message:foo"],
}
)
assert response.status_code == 200, response.content
assert len(response.data["data"]) == 1
assert response.data["data"][0]["tags[foo]"] == "five"
| OrganizationEventsSpansEndpointTest |
python | conda__conda | conda/activate.py | {
"start": 39581,
"end": 40517
} | class ____(_Activator):
pathsep_join = ";".join if on_win else ":".join
sep = "\\" if on_win else "/"
path_conversion = staticmethod(_path_identity)
script_extension = ".ps1"
tempfile_extension = None # output to stdout
command_join = "\n"
needs_line_ending_fix = False
unset_var_tmpl = "$Env:%s = $null"
export_var_tmpl = '$Env:%s = "%s"'
path_var_tmpl = export_var_tmpl
set_var_tmpl = export_var_tmpl
run_script_tmpl = '. "%s"'
hook_source_path = Path(
CONDA_PACKAGE_ROOT,
"shell",
"condabin",
"conda-hook.ps1",
)
inline_hook_source = True
def _hook_preamble(self) -> str:
module_args = f"$CondaModuleArgs = @{{ChangePs1 = ${context.changeps1}}}"
return super()._hook_preamble() + module_args + self.command_join
def _hook_postamble(self) -> str:
return "Remove-Variable CondaModuleArgs"
| PowerShellActivator |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 22647,
"end": 23190
} | class ____(GeneratedAirbyteSource):
@public
def __init__(self, name: str, api_key: str):
"""Airbyte Source for Greenhouse.
Documentation can be found at https://docs.airbyte.com/integrations/sources/greenhouse
Args:
name (str): The name of the destination.
api_key (str): Greenhouse API Key. See the docs for more information on how to generate this key.
"""
self.api_key = check.str_param(api_key, "api_key")
super().__init__("Greenhouse", name)
| GreenhouseSource |
python | scikit-learn__scikit-learn | sklearn/svm/_classes.py | {
"start": 1327,
"end": 14429
} | class ____(LinearClassifierMixin, SparseCoefMixin, BaseEstimator):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
The main differences between :class:`~sklearn.svm.LinearSVC` and
:class:`~sklearn.svm.SVC` lie in the loss function used by default, and in
the handling of intercept regularization between those two implementations.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
penalty : {'l1', 'l2'}, default='l2'
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to ``coef_``
vectors that are sparse.
loss : {'hinge', 'squared_hinge'}, default='squared_hinge'
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss. The combination of ``penalty='l1'``
and ``loss='hinge'`` is not supported.
dual : "auto" or bool, default="auto"
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
`dual="auto"` will choose the value of the parameter automatically,
based on the values of `n_samples`, `n_features`, `loss`, `multi_class`
and `penalty`. If `n_samples` < `n_features` and optimizer supports
chosen `loss`, `multi_class` and `penalty`, then dual will be set to True,
otherwise it will be set to False.
.. versionchanged:: 1.3
The `"auto"` option is added in version 1.3 and will be the default
in version 1.5.
tol : float, default=1e-4
Tolerance for stopping criteria.
C : float, default=1.0
Regularization parameter. The strength of the regularization is
inversely proportional to C. Must be strictly positive.
For an intuitive visualization of the effects of scaling
the regularization parameter C, see
:ref:`sphx_glr_auto_examples_svm_plot_svm_scale_c.py`.
multi_class : {'ovr', 'crammer_singer'}, default='ovr'
Determines the multi-class strategy if `y` contains more than
two classes.
``"ovr"`` trains n_classes one-vs-rest classifiers, while
``"crammer_singer"`` optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If ``"crammer_singer"`` is chosen, the options loss, penalty and dual
will be ignored.
fit_intercept : bool, default=True
Whether or not to fit an intercept. If set to True, the feature vector
is extended to include an intercept term: `[x_1, ..., x_n, 1]`, where
1 corresponds to the intercept. If set to False, no intercept will be
used in calculations (i.e. data is expected to be already centered).
intercept_scaling : float, default=1.0
When `fit_intercept` is True, the instance vector x becomes ``[x_1,
..., x_n, intercept_scaling]``, i.e. a "synthetic" feature with a
constant value equal to `intercept_scaling` is appended to the instance
vector. The intercept becomes intercept_scaling * synthetic feature
weight. Note that liblinear internally penalizes the intercept,
treating it like any other term in the feature vector. To reduce the
impact of the regularization on the intercept, the `intercept_scaling`
parameter can be set to a value greater than 1; the higher the value of
`intercept_scaling`, the lower the impact of regularization on it.
Then, the weights become `[w_x_1, ..., w_x_n,
w_intercept*intercept_scaling]`, where `w_x_1, ..., w_x_n` represent
the feature weights and the intercept weight is scaled by
`intercept_scaling`. This scaling allows the intercept term to have a
different regularization behavior compared to the other features.
class_weight : dict or 'balanced', default=None
Set the parameter C of class i to ``class_weight[i]*C`` for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
verbose : int, default=0
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int, RandomState instance or None, default=None
Controls the pseudo random number generation for shuffling the data for
the dual coordinate descent (if ``dual=True``). When ``dual=False`` the
underlying implementation of :class:`LinearSVC` is not random and
``random_state`` has no effect on the results.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
max_iter : int, default=1000
The maximum number of iterations to be run.
Attributes
----------
coef_ : ndarray of shape (1, n_features) if n_classes == 2 \
else (n_classes, n_features)
Weights assigned to the features (coefficients in the primal
problem).
``coef_`` is a readonly property derived from ``raw_coef_`` that
follows the internal memory layout of liblinear.
intercept_ : ndarray of shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
classes_ : ndarray of shape (n_classes,)
The unique classes labels.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : int
Maximum number of iterations run across all classes.
See Also
--------
SVC : Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`~sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier : SGDClassifier can optimize the same
cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation, liblinear, uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
`LIBLINEAR: A Library for Large Linear Classification
<https://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
Examples
--------
>>> from sklearn.svm import LinearSVC
>>> from sklearn.pipeline import make_pipeline
>>> from sklearn.preprocessing import StandardScaler
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(n_features=4, random_state=0)
>>> clf = make_pipeline(StandardScaler(),
... LinearSVC(random_state=0, tol=1e-5))
>>> clf.fit(X, y)
Pipeline(steps=[('standardscaler', StandardScaler()),
('linearsvc', LinearSVC(random_state=0, tol=1e-05))])
>>> print(clf.named_steps['linearsvc'].coef_)
[[0.141 0.526 0.679 0.493]]
>>> print(clf.named_steps['linearsvc'].intercept_)
[0.1693]
>>> print(clf.predict([[0, 0, 0, 0]]))
[1]
"""
_parameter_constraints: dict = {
"penalty": [StrOptions({"l1", "l2"})],
"loss": [StrOptions({"hinge", "squared_hinge"})],
"dual": ["boolean", StrOptions({"auto"})],
"tol": [Interval(Real, 0.0, None, closed="neither")],
"C": [Interval(Real, 0.0, None, closed="neither")],
"multi_class": [StrOptions({"ovr", "crammer_singer"})],
"fit_intercept": ["boolean"],
"intercept_scaling": [Interval(Real, 0, None, closed="neither")],
"class_weight": [None, dict, StrOptions({"balanced"})],
"verbose": ["verbose"],
"random_state": ["random_state"],
"max_iter": [Interval(Integral, 0, None, closed="left")],
}
def __init__(
self,
penalty="l2",
loss="squared_hinge",
*,
dual="auto",
tol=1e-4,
C=1.0,
multi_class="ovr",
fit_intercept=True,
intercept_scaling=1,
class_weight=None,
verbose=0,
random_state=None,
max_iter=1000,
):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target vector relative to X.
sample_weight : array-like of shape (n_samples,), default=None
Array of weights that are assigned to individual
samples. If not provided,
then each sample is given unit weight.
.. versionadded:: 0.18
Returns
-------
self : object
An instance of the estimator.
"""
X, y = validate_data(
self,
X,
y,
accept_sparse="csr",
dtype=np.float64,
order="C",
accept_large_sparse=False,
)
check_classification_targets(y)
self.classes_ = np.unique(y)
_dual = _validate_dual_parameter(
self.dual, self.loss, self.penalty, self.multi_class, X
)
self.coef_, self.intercept_, n_iter_ = _fit_liblinear(
X,
y,
self.C,
self.fit_intercept,
self.intercept_scaling,
self.class_weight,
self.penalty,
_dual,
self.verbose,
self.max_iter,
self.tol,
self.random_state,
self.multi_class,
self.loss,
sample_weight=sample_weight,
)
# Backward compatibility: _fit_liblinear is used both by LinearSVC/R
# and LogisticRegression but LogisticRegression sets a structured
# `n_iter_` attribute with information about the underlying OvR fits
# while LinearSVC/R only reports the maximum value.
self.n_iter_ = n_iter_.max().item()
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = True
return tags
| LinearSVC |
python | pytorch__pytorch | torch/_dynamo/guards.py | {
"start": 7290,
"end": 33276
} | class ____:
"""
A helper class that contains the root guard manager. An instance of this
class is stored in the Dynamo cache entry, so that the cache entry can
access the RootGuardManager stored in the "root" attribute and directly call
the check_nopybind from C++.
"""
def __init__(self, root: Optional[RootGuardManager] = None) -> None:
if root is None:
self.root = RootGuardManager()
else:
self.root = root
self.diff_guard_root: Optional[RootGuardManager] = None
self.closure_vars: Optional[dict[str, Any]] = None
self.args: Optional[list[str]] = None
self.code_parts: list[str] = []
self.verbose_code_parts: Optional[list[str]] = None
self.global_scope: Optional[dict[str, Any]] = None
self.guard_fail_fn: Optional[Callable[[GuardFail], None]] = None
self.cache_entry: Optional[CacheEntry] = None
self.extra_state: Optional[ExtraState] = None
self.id_matched_objs: dict[str, ReferenceType[object]] = {}
self.no_tensor_aliasing_sources: list[str] = []
self.printed_relational_guards: set[RelationalGuard] = set()
self.diff_guard_sources: OrderedSet[str] = OrderedSet()
@contextmanager
def _preserve_printed_relational_guards(self) -> Generator[None, None, None]:
self.printed_relational_guards = set()
try:
yield
finally:
self.printed_relational_guards = set()
# TODO: clarify what fn and attributes guard manager has to get the right things here
def collect_diff_guard_sources(self) -> OrderedSet[str]:
# At the time of finalize, we have only marked guard managers with
# TENSOR_MATCH guards as diff guard managers. So, we do a tree traversal
# and collect all the nodes in the tree (branches) that lead to tensor
# guards.
# After a recompilation, some of guard managers will have a fail_count >
# 0, so we collect them as well. Later on, we accumulate the diff guard
# sources for all the guard managers.
def visit_dict_manager(node: DictGuardManager) -> bool:
is_diff_guard_node = (
node.get_source() in self.diff_guard_sources or node.fail_count() > 0
)
for _idx, (key_mgr, val_mgr) in sorted(
node.get_key_value_managers().items()
):
is_diff_guard_node |= visit(key_mgr) | visit(val_mgr)
if is_diff_guard_node:
self.diff_guard_sources.add(node.get_source())
return is_diff_guard_node
def visit_manager(node: GuardManager) -> bool:
assert not isinstance(node, DictGuardManager)
is_diff_guard_node = (
node.get_source() in self.diff_guard_sources or node.fail_count() > 0
)
for child_mgr in node.get_child_managers():
is_diff_guard_node |= visit(child_mgr)
if is_diff_guard_node:
self.diff_guard_sources.add(node.get_source())
return is_diff_guard_node
def visit(node: GuardManager) -> bool:
if node is None:
return False
if isinstance(node, DictGuardManager):
return visit_dict_manager(node)
return visit_manager(node)
visit(self.root)
return self.diff_guard_sources
def finalize(self) -> None:
if config.use_recursive_dict_tags_for_guards and justknobs_check(
"pytorch/compiler:use_recursive_dict_tags_for_guards"
):
self.find_tag_safe_roots()
self.prepare_diff_guard_manager()
def prepare_diff_guard_manager(self) -> None:
self.collect_diff_guard_sources()
self.populate_diff_guard_manager()
def find_tag_safe_roots(self) -> None:
"""
Identify ``tag safe nodes`` and ``tag safe roots`` within a guard tree.
-----------------------------------------------------------------------
tag safe node
-----------------------------------------------------------------------
A *tag safe node* is a ``GuardManager`` whose guarded value satisfies one
of the following conditions:
1. Immutable value - The value is intrinsically immutable according to
``is_immutable_object``. Tensors are considered immutable. To ensure
that symbolic guards run, we also check that the GuardManager has no
accessors.
2. Nested tag safe dictionary - The value is a ``dict`` whose keys and
values are all tag safe nodes (checked recursively). Such dictionaries
allow entire nested structures to be skipped once their identity tag
matches.
3. Pure ``nn.Module`` - The value is an ``nn.Module`` whose sole
accessor is ``GetGenericDictGuardAccessor``—i.e., it only exposes its
``__dict__`` and nothing else that could mutate between runs.
For every tag safe node, verifying the identity/tag of just the top-level
dictionary is enough to guarantee the entire subtree is unchanged, enabling
a *fast-path* guard check.
-----------------------------------------------------------------------
tag safe root
-----------------------------------------------------------------------
A ``tag safe root`` is a tag safe node whose parent is not tag safe.
These boundary nodes mark the points where guard evaluation can safely
prune traversal: if a tag-safe root's dictionary tag matches, the entire
subtree beneath it is skipped.
One strong requirement for tag safe root is for the guarded object to
support weakref. Refer to more details in the Recursive dict tag
matching note. In short, we need to save the weakref of the object on
first invocation, and check if it is still valid in later iterations, to
apply recursive dict tag optimizations. `dict` objects do NOT support
weakref. Therefore, as of now, we only mark nn module related guard
managers as tag safe roots.
Algorithm
---------
The search runs in post-order traversal
1. Visit leaves and classify them as tag safe or not.
2. Propagate tag-safety upward: a parent dictionary becomes tag safe only if
all of its children are already tag-safe.
3. Propagate tag-safe-rootness upward: if the whole subtree is tag safe,
the current node becomes the new tag safe root, otherwise propagate the
subtree tag safe roots.
4. Collect every tag safe node and, by inspecting parent tags, label the
subset that are tag safe roots.
"""
def check_tag_safety(
node: GuardManager, accepted_accessors: tuple[type[GuardAccessor], ...]
) -> bool:
accessors = node.get_accessors()
child_mgrs = node.get_child_managers()
return all(
isinstance(accessor, accepted_accessors) and mgr.is_tag_safe()
for accessor, mgr in zip(accessors, child_mgrs)
)
def visit_dict_manager(node: DictGuardManager) -> list[GuardManager]:
# Just recurse through the key and value dict managers and check if
# all of them are tag safe nodes.
assert issubclass(node.get_type_of_guarded_value(), dict)
tag_safe_roots = []
is_subtree_tag_safe = True
# Recurse to get the tag safe roots from subtree.
for _idx, (key_mgr, val_mgr) in sorted(
node.get_key_value_managers().items()
):
if key_mgr is not None:
visit(key_mgr)
if val_mgr is not None:
tag_safe_roots.extend(visit(val_mgr))
for key_mgr, val_mgr in node.get_key_value_managers().values():
if key_mgr:
is_subtree_tag_safe &= key_mgr.is_tag_safe()
if val_mgr:
is_subtree_tag_safe &= val_mgr.is_tag_safe()
if is_subtree_tag_safe:
node.mark_tag_safe()
return tag_safe_roots
def visit_manager(node: GuardManager) -> list[GuardManager]:
assert not isinstance(node, DictGuardManager)
# Collect the subtree tag safe roots
tag_safe_roots = []
for child_mgr in node.get_child_managers():
tag_safe_roots.extend(visit(child_mgr))
if node.is_guarded_value_immutable():
# If the node guards a tensor, mark it tag safe only if there
# are no accessors. Presence of accessors means presence of
# symbolic shape guards.
if issubclass(node.get_type_of_guarded_value(), torch.Tensor):
if node.has_no_accessors() and not node.has_object_aliasing_guard():
node.mark_tag_safe()
else:
node.mark_tag_safe()
elif issubclass(node.get_type_of_guarded_value(), dict):
accessors = node.get_accessors()
child_mgrs = node.get_child_managers()
is_subtree_tag_safe = all(
isinstance(accessor, DictGetItemGuardAccessor) and mgr.is_tag_safe()
for accessor, mgr in zip(accessors, child_mgrs)
)
if is_subtree_tag_safe:
node.mark_tag_safe()
elif issubclass(node.get_type_of_guarded_value(), torch.nn.Module):
is_subtree_tag_safe = check_tag_safety(
node, (GetGenericDictGuardAccessor, TypeGuardAccessor)
)
if is_subtree_tag_safe:
node.mark_tag_safe()
# Return the current node as tag safe root, discarding the
# subtree tag safe roots.
return [
node,
]
elif (
node.get_type_of_guarded_value()
in (
types.FunctionType,
types.MethodType,
staticmethod,
classmethod,
)
and config.assume_dunder_attributes_remain_unchanged
):
# Assumption: callers will not reassignthe attributes
# func.__code__, func.__closure__, func.__defaults__, or func.__kwdefaults__.
# Mutating the objects those attributes point to is fine;
# rebinding the attribute itself is not.
# Example ─ allowed: foo.__defaults__[0].bar = 99
# forbidden: foo.__defaults__ = (3, 4)
is_subtree_tag_safe = check_tag_safety(
node,
(
CodeGuardAccessor,
ClosureGuardAccessor,
FuncDefaultsGuardAccessor,
FuncKwDefaultsGuardAccessor,
GetAttrGuardAccessor,
),
)
for accessor in node.get_accessors():
if isinstance(accessor, GetAttrGuardAccessor):
is_subtree_tag_safe &= (
accessor.get_attr_name() in dunder_attrs_assumed_constants
)
if is_subtree_tag_safe:
node.mark_tag_safe()
elif issubclass(node.get_type_of_guarded_value(), types.CellType):
is_subtree_tag_safe = check_tag_safety(node, (GetAttrGuardAccessor,))
is_subtree_tag_safe &= all(
isinstance(accessor, GetAttrGuardAccessor)
and accessor.get_attr_name() == "cell_contents"
for accessor in node.get_accessors()
)
if is_subtree_tag_safe:
node.mark_tag_safe()
elif (
issubclass(node.get_type_of_guarded_value(), tuple)
and node.get_source().endswith(dunder_attrs_assumed_constants)
and config.assume_dunder_attributes_remain_unchanged
):
# We trust tuples obtained from a function's __closure__ or
# __defaults__. Any *other* tuple-valued attribute can be
# silently replaced—for example:
#
# foo.bar = (1, 2) # original
# foo.bar = (3, 4) # rebinding that our dict-tag optimisation won't see
#
# Therefore only tuples from __closure__ / __defaults__ participate in the
# recursive-dict-tag optimization; all others are ignored.
is_subtree_tag_safe = check_tag_safety(
node, (TupleGetItemGuardAccessor,)
)
if is_subtree_tag_safe:
node.mark_tag_safe()
elif issubclass(node.get_type_of_guarded_value(), type):
is_subtree_tag_safe = check_tag_safety(
node, (TypeDictGuardAccessor, TypeMROGuardAccessor)
)
if is_subtree_tag_safe:
node.mark_tag_safe()
return tag_safe_roots
def visit(node: GuardManager) -> list[GuardManager]:
if node is None:
return []
if isinstance(node, DictGuardManager):
return visit_dict_manager(node)
return visit_manager(node)
tag_safe_roots = visit(self.root)
for node in tag_safe_roots:
if issubclass(node.get_type_of_guarded_value(), torch.nn.Module):
node.mark_tag_safe_root()
def populate_diff_guard_manager(self) -> None:
self.diff_guard_root = self.clone_with_chosen_sources(self.diff_guard_sources)
# Ensure that that C++ side points to the updated diff guard manager.
# When a new GuardManagerWrapper is created, it does not have a
# cache_entry attribute, so it relies on the CacheEntry constructor to
# set the diff_guard_root in C++. But once it is saved in the Dynamo
# cache, C++ side adds a cache_entry attribute. On recompiles, this
# cache_entry is visible, so we update the C++ side to point to the
# update guard manager.
if self.cache_entry:
self.cache_entry.update_diff_guard_root_manager()
def clone_with_chosen_sources(
self, chosen_sources: OrderedSet[str]
) -> RootGuardManager:
def filter_fn(node_mgr: GuardManager) -> bool:
return node_mgr.get_source() in chosen_sources
return self.root.clone_manager(filter_fn)
def get_guard_lines(self, guard: LeafGuard) -> list[str]:
guard_name = guard.__class__.__name__
parts = guard.verbose_code_parts()
parts = [guard_name + ": " + part for part in parts]
return parts
def get_manager_line(
self, guard_manager: GuardManager, accessor_str: Optional[str] = None
) -> str:
source = guard_manager.get_source()
t = guard_manager.__class__.__name__
s = t + ": source=" + source
if accessor_str:
s += ", " + accessor_str
s += f", type={guard_manager.get_type_of_guarded_value()}"
s += f", tag_safe=({guard_manager.is_tag_safe()}, {guard_manager.is_tag_safe_root()})"
return s
def construct_dict_manager_string(
self, mgr: DictGuardManager, body: IndentedBufferWithPrefix
) -> None:
for idx, (key_mgr, val_mgr) in sorted(mgr.get_key_value_managers().items()):
body.writeline(f"KeyValueManager pair at index={idx}")
with body.indent():
if key_mgr:
body.writeline(f"KeyManager: {self.get_manager_line(key_mgr)}")
self.construct_manager_string(key_mgr, body)
if val_mgr:
body.writeline(f"ValueManager: {self.get_manager_line(val_mgr)}")
self.construct_manager_string(val_mgr, body)
def construct_manager_string(
self, mgr: GuardManager, body: IndentedBufferWithPrefix
) -> None:
with body.indent():
for guard in mgr.get_leaf_guards():
if isinstance(guard, RelationalGuard):
if guard not in self.printed_relational_guards:
self.printed_relational_guards.add(guard)
# pyrefly: ignore [bad-argument-type]
body.writelines(self.get_guard_lines(guard))
else:
body.writelines(
[
guard.__class__.__name__,
]
)
else:
body.writelines(self.get_guard_lines(guard))
# This works for both DictGuardManager and SubclassedDictGuardManager
if isinstance(mgr, DictGuardManager):
self.construct_dict_manager_string(mgr, body)
# General case of GuardManager/RootGuardManager
for accessor, child_mgr in zip(
mgr.get_accessors(), mgr.get_child_managers()
):
body.writeline(
self.get_manager_line(child_mgr, f"accessed_by={accessor.repr()}")
)
self.construct_manager_string(child_mgr, body)
def __str__(self) -> str:
with self._preserve_printed_relational_guards():
body = IndentedBufferWithPrefix()
body.tabwidth = 1
body.writeline("", skip_prefix=True)
body.writeline("TREE_GUARD_MANAGER:", skip_prefix=True)
body.writeline("RootGuardManager")
self.construct_manager_string(self.root, body)
if hasattr(self.root, "get_epilogue_lambda_guards"):
for guard in self.root.get_epilogue_lambda_guards():
body.writelines(self.get_guard_lines(guard))
return body.getvalue()
def check(self, x: Any) -> bool:
# Only needed for debugging purposes.
return self.root.check(x)
def check_verbose(self, x: Any) -> GuardDebugInfo:
# Only needed for debugging purposes.
return self.root.check_verbose(x)
def populate_code_parts_for_debugging(self) -> None:
# This should be called when the guard manager is fully populated
relational_guards_seen = set()
def get_code_parts(leaf_guard: LeafGuard) -> list[str]:
code_parts = []
for verbose_code_part in leaf_guard.verbose_code_parts():
code_part = verbose_code_part.split("#")[0].rstrip()
code_parts.append(code_part)
return code_parts
def visit(mgr: GuardManager) -> None:
nonlocal relational_guards_seen
for guard in mgr.get_leaf_guards():
if isinstance(guard, RelationalGuard):
if guard not in relational_guards_seen:
# pyrefly: ignore [bad-argument-type]
self.code_parts.extend(get_code_parts(guard))
relational_guards_seen.add(guard)
else:
self.code_parts.extend(get_code_parts(guard))
for child_mgr in mgr.get_child_managers():
visit(child_mgr)
visit(self.root)
def from_numpy(a: Any) -> torch.Tensor:
# If not numpy array, piggy back on e.g. tensor guards to check type
# Re-enable torch function since we disable it on leaf guards
# we need it to properly construct the tensor if a default device is set
with torch.overrides._enable_torch_function():
# pyrefly: ignore [missing-attribute]
return torch.as_tensor(a) if isinstance(a, (np.generic, np.ndarray)) else a
# For user stack printing
@functools.cache
def uninteresting_files() -> set[str]:
import torch._dynamo.external_utils
import torch._dynamo.polyfills
mods = [torch._dynamo.external_utils, torch._dynamo.polyfills]
from torch._dynamo.polyfills.loader import POLYFILLED_MODULES
# pyrefly: ignore [bad-argument-type]
mods.extend(POLYFILLED_MODULES)
return {inspect.getfile(m) for m in mods}
_CLOSURE_VARS: Optional[dict[str, object]] = None
def _get_closure_vars() -> dict[str, object]:
global _CLOSURE_VARS
if _CLOSURE_VARS is None:
_CLOSURE_VARS = {
"___check_type_id": check_type_id,
"___check_obj_id": check_obj_id,
"___odict_getitem": collections.OrderedDict.__getitem__,
"___key_to_id": key_to_id,
"___dict_version": dict_version,
"___dict_contains": lambda a, b: dict.__contains__(b, a),
"___tuple_iterator_len": tuple_iterator_len,
"___normalize_range_iter": normalize_range_iter,
"___tuple_iterator_getitem": tuple_iterator_getitem,
"___dataclass_fields": dataclass_fields,
"___namedtuple_fields": lambda x: x._fields,
"___get_torch_function_mode_stack_at": get_torch_function_mode_stack_at,
"___get_current_stream": get_current_stream,
"__math_isnan": math.isnan,
"__numpy_isnan": None if np is None else np.isnan,
"inf": float("inf"),
"__load_module": importlib.import_module,
"utils_device": torch.utils._device,
"device": torch.device,
"___from_numpy": from_numpy,
"___as_tensor": torch._as_tensor_fullprec,
"torch": torch,
"inspect": inspect,
}
return _CLOSURE_VARS
def _ast_unparse(node: ast.AST) -> str:
return ast.unparse(node).replace("\n", "")
strip_function_call = torch._C._dynamo.strip_function_call
def get_verbose_code_part(code_part: str, guard: Optional[Guard]) -> str:
extra = ""
if guard is not None:
if guard.user_stack:
for fs in reversed(guard.user_stack):
if fs.filename not in uninteresting_files():
extra = f" # {format_frame(fs, line=True)}"
if len(extra) > 1024:
# For fx graphs, the line can be very long in case of
# torch.stack ops, where many inputs are set to None
# after the operation. This increases the size of the
# guards log file. In such cases, do not print the line
# contents.
extra = f" # {format_frame(fs)}"
break
elif guard.stack:
summary = guard.stack.summary()
if len(summary) > 0:
extra = f" # {format_frame(summary[-1])}"
else:
extra = " # <unknown>"
return f"{code_part:<60}{extra}"
def get_verbose_code_parts(
code_parts: Union[str, list[str]],
guard: Optional[Guard],
recompile_hint: Optional[str] = None,
) -> list[str]:
if not isinstance(code_parts, list):
code_parts = [code_parts]
verbose_code_parts = [
get_verbose_code_part(code_part, guard) for code_part in code_parts
]
if recompile_hint:
verbose_code_parts = [
f"{part} (HINT: {recompile_hint})" for part in verbose_code_parts
]
return verbose_code_parts
def convert_int_to_concrete_values(dim: Any) -> Optional[int]:
if dim is None:
return None
if not is_symbolic(dim):
return dim
else:
assert isinstance(dim, torch.SymInt)
return dim.node.maybe_as_int()
def convert_to_concrete_values(size_or_stride: list[Any]) -> list[Optional[int]]:
return [convert_int_to_concrete_values(dim) for dim in size_or_stride]
def get_tensor_guard_code_part(
value: torch.Tensor,
name: str,
sizes: list[Optional[int]],
strides: list[Optional[int]],
pytype: type,
dispatch_keys: DispatchKeySet,
) -> str:
dispatch_key = (
dispatch_keys | torch._C._dispatch_tls_local_include_set()
) - torch._C._dispatch_tls_local_exclude_set()
dtype = value.dtype
device_index = value.device.index
requires_grad = value.requires_grad
guard_str = (
f"check_tensor({name}, {pytype.__qualname__}, {dispatch_key}, {dtype}, "
f"device={device_index}, requires_grad={requires_grad}, size={sizes}, stride={strides})"
)
return guard_str
def get_key_index(dct: dict[Any, Any], key: Any) -> int:
# Ensure that we call dict.keys and not value.keys (which can call
# overridden keys method). In the C++ guards, we relied on PyDict_Next
# to traverse the dictionary, which uses the internal data structure and
# does not call the overridden keys method.
return list(builtin_dict_keys(dct)).index(key)
def get_key_index_source(source: Any, index: Any) -> str:
return f"list(dict.keys({source}))[{index}]"
def raise_local_type_error(obj: Any) -> NoReturn:
raise TypeError(
f"Type {type(obj)} for object {obj} cannot be saved "
+ "into torch.compile() package since it's defined in local scope. "
+ "Please define the class at global scope (top level of a module)."
)
def should_optimize_getattr_on_nn_module(value: Any) -> bool:
# If inline_inbuilt_nn_modules flag is True, Dynamo has already traced
# through the __getattr__, and therefore it is always safe to optimize
# getattr on nn modules.
return isinstance(value, torch.nn.Module) and (
config.inline_inbuilt_nn_modules
or get_custom_getattr(value) is unpatched_nn_module_getattr
)
@dataclasses.dataclass(frozen=True)
| GuardManagerWrapper |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/dsl/ir.py | {
"start": 60535,
"end": 70738
} | class ____(IR):
"""Perform a groupby."""
__slots__ = (
"agg_requests",
"keys",
"maintain_order",
"zlice",
)
_non_child = (
"schema",
"keys",
"agg_requests",
"maintain_order",
"zlice",
)
keys: tuple[expr.NamedExpr, ...]
"""Grouping keys."""
agg_requests: tuple[expr.NamedExpr, ...]
"""Aggregation expressions."""
maintain_order: bool
"""Preserve order in groupby."""
zlice: Zlice | None
"""Optional slice to apply after grouping."""
def __init__(
self,
schema: Schema,
keys: Sequence[expr.NamedExpr],
agg_requests: Sequence[expr.NamedExpr],
maintain_order: bool, # noqa: FBT001
zlice: Zlice | None,
df: IR,
):
self.schema = schema
self.keys = tuple(keys)
for request in agg_requests:
expr = request.value
if isinstance(expr, unary.UnaryFunction) and expr.name == "value_counts":
raise NotImplementedError("value_counts is not supported in groupby")
if any(
isinstance(child, unary.UnaryFunction) and child.name == "value_counts"
for child in expr.children
):
raise NotImplementedError("value_counts is not supported in groupby")
self.agg_requests = tuple(agg_requests)
self.maintain_order = maintain_order
self.zlice = zlice
self.children = (df,)
self._non_child_args = (
schema,
self.keys,
self.agg_requests,
maintain_order,
self.zlice,
)
@classmethod
@log_do_evaluate
@nvtx_annotate_cudf_polars(message="GroupBy")
def do_evaluate(
cls,
schema: Schema,
keys_in: Sequence[expr.NamedExpr],
agg_requests: Sequence[expr.NamedExpr],
maintain_order: bool, # noqa: FBT001
zlice: Zlice | None,
df: DataFrame,
*,
context: IRExecutionContext,
) -> DataFrame:
"""Evaluate and return a dataframe."""
keys = broadcast(
*(k.evaluate(df) for k in keys_in),
target_length=df.num_rows,
stream=df.stream,
)
sorted = (
plc.types.Sorted.YES
if all(k.is_sorted for k in keys)
else plc.types.Sorted.NO
)
grouper = plc.groupby.GroupBy(
plc.Table([k.obj for k in keys]),
null_handling=plc.types.NullPolicy.INCLUDE,
keys_are_sorted=sorted,
column_order=[k.order for k in keys],
null_precedence=[k.null_order for k in keys],
)
requests = []
names = []
for request in agg_requests:
name = request.name
value = request.value
if isinstance(value, expr.Len):
# A count aggregation, we need a column so use a key column
col = keys[0].obj
elif isinstance(value, expr.Agg):
if value.name == "quantile":
child = value.children[0]
else:
(child,) = value.children
col = child.evaluate(df, context=ExecutionContext.GROUPBY).obj
else:
# Anything else, we pre-evaluate
col = value.evaluate(df, context=ExecutionContext.GROUPBY).obj
requests.append(plc.groupby.GroupByRequest(col, [value.agg_request]))
names.append(name)
group_keys, raw_tables = grouper.aggregate(requests, stream=df.stream)
results = [
Column(column, name=name, dtype=schema[name])
for name, column, request in zip(
names,
itertools.chain.from_iterable(t.columns() for t in raw_tables),
agg_requests,
strict=True,
)
]
result_keys = [
Column(grouped_key, name=key.name, dtype=key.dtype)
for key, grouped_key in zip(keys, group_keys.columns(), strict=True)
]
broadcasted = broadcast(*result_keys, *results, stream=df.stream)
# Handle order preservation of groups
if maintain_order and not sorted:
# The order we want
want = plc.stream_compaction.stable_distinct(
plc.Table([k.obj for k in keys]),
list(range(group_keys.num_columns())),
plc.stream_compaction.DuplicateKeepOption.KEEP_FIRST,
plc.types.NullEquality.EQUAL,
plc.types.NanEquality.ALL_EQUAL,
stream=df.stream,
)
# The order we have
have = plc.Table([key.obj for key in broadcasted[: len(keys)]])
# We know an inner join is OK because by construction
# want and have are permutations of each other.
left_order, right_order = plc.join.inner_join(
want, have, plc.types.NullEquality.EQUAL, stream=df.stream
)
# Now left_order is an arbitrary permutation of the ordering we
# want, and right_order is a matching permutation of the ordering
# we have. To get to the original ordering, we need
# left_order == iota(nrows), with right_order permuted
# appropriately. This can be obtained by sorting
# right_order by left_order.
(right_order,) = plc.sorting.sort_by_key(
plc.Table([right_order]),
plc.Table([left_order]),
[plc.types.Order.ASCENDING],
[plc.types.NullOrder.AFTER],
stream=df.stream,
).columns()
ordered_table = plc.copying.gather(
plc.Table([col.obj for col in broadcasted]),
right_order,
plc.copying.OutOfBoundsPolicy.DONT_CHECK,
stream=df.stream,
)
broadcasted = [
Column(reordered, name=old.name, dtype=old.dtype)
for reordered, old in zip(
ordered_table.columns(), broadcasted, strict=True
)
]
return DataFrame(broadcasted, stream=df.stream).slice(zlice)
def _strip_predicate_casts(node: expr.Expr) -> expr.Expr:
if isinstance(node, expr.Cast):
(child,) = node.children
child = _strip_predicate_casts(child)
src = child.dtype
dst = node.dtype
if plc.traits.is_fixed_point(src.plc_type) or plc.traits.is_fixed_point(
dst.plc_type
):
return child
if (
not POLARS_VERSION_LT_134
and isinstance(child, expr.ColRef)
and (
(
plc.traits.is_floating_point(src.plc_type)
and plc.traits.is_floating_point(dst.plc_type)
)
or (
plc.traits.is_integral(src.plc_type)
and plc.traits.is_integral(dst.plc_type)
and src.plc_type.id() == dst.plc_type.id()
)
)
):
return child
if not node.children:
return node
return node.reconstruct([_strip_predicate_casts(child) for child in node.children])
def _add_cast(
target: DataType,
side: expr.ColRef,
left_casts: dict[str, DataType],
right_casts: dict[str, DataType],
) -> None:
(col,) = side.children
assert isinstance(col, expr.Col)
casts = (
left_casts if side.table_ref == plc_expr.TableReference.LEFT else right_casts
)
casts[col.name] = target
def _align_decimal_binop_types(
left_expr: expr.ColRef,
right_expr: expr.ColRef,
left_casts: dict[str, DataType],
right_casts: dict[str, DataType],
) -> None:
left_type, right_type = left_expr.dtype, right_expr.dtype
if plc.traits.is_fixed_point(left_type.plc_type) and plc.traits.is_fixed_point(
right_type.plc_type
):
target = DataType.common_decimal_dtype(left_type, right_type)
if left_type.id() != target.id() or left_type.scale() != target.scale():
_add_cast(target, left_expr, left_casts, right_casts)
if right_type.id() != target.id() or right_type.scale() != target.scale():
_add_cast(target, right_expr, left_casts, right_casts)
elif (
plc.traits.is_fixed_point(left_type.plc_type)
and plc.traits.is_floating_point(right_type.plc_type)
) or (
plc.traits.is_fixed_point(right_type.plc_type)
and plc.traits.is_floating_point(left_type.plc_type)
):
is_decimal_left = plc.traits.is_fixed_point(left_type.plc_type)
decimal_expr, float_expr = (
(left_expr, right_expr) if is_decimal_left else (right_expr, left_expr)
)
_add_cast(decimal_expr.dtype, float_expr, left_casts, right_casts)
def _collect_decimal_binop_casts(
predicate: expr.Expr,
) -> tuple[dict[str, DataType], dict[str, DataType]]:
left_casts: dict[str, DataType] = {}
right_casts: dict[str, DataType] = {}
def _walk(node: expr.Expr) -> None:
if isinstance(node, expr.BinOp) and node.op in _BINOPS:
left_expr, right_expr = node.children
if isinstance(left_expr, expr.ColRef) and isinstance(
right_expr, expr.ColRef
):
_align_decimal_binop_types(
left_expr, right_expr, left_casts, right_casts
)
for child in node.children:
_walk(child)
_walk(predicate)
return left_casts, right_casts
def _apply_casts(df: DataFrame, casts: dict[str, DataType]) -> DataFrame:
if not casts:
return df
columns = []
for col in df.columns:
target = casts.get(col.name)
if target is None:
columns.append(Column(col.obj, dtype=col.dtype, name=col.name))
else:
casted = col.astype(target, stream=df.stream)
columns.append(Column(casted.obj, dtype=casted.dtype, name=col.name))
return DataFrame(columns, stream=df.stream)
| GroupBy |
python | pypa__warehouse | tests/common/db/sponsors.py | {
"start": 139,
"end": 701
} | class ____(WarehouseFactory):
class Meta:
model = Sponsor
name = factory.Faker("word")
service = factory.Faker("sentence")
activity_markdown = factory.Faker("sentence")
link_url = factory.Faker("uri")
color_logo_url = factory.Faker("image_url")
white_logo_url = factory.Faker("image_url")
is_active = True
footer = True
psf_sponsor = True
infra_sponsor = False
one_time = False
sidebar = True
origin = "manual"
level_name = ""
level_order = 0
slug = factory.Faker("slug")
| SponsorFactory |
python | tornadoweb__tornado | demos/s3server/s3server.py | {
"start": 5322,
"end": 8252
} | class ____(BaseRequestHandler):
def get(self, bucket_name):
prefix = self.get_argument("prefix", "")
marker = self.get_argument("marker", "")
max_keys = int(self.get_argument("max-keys", 50000))
path = os.path.abspath(os.path.join(self.application.directory, bucket_name))
terse = int(self.get_argument("terse", 0))
if not path.startswith(self.application.directory) or not os.path.isdir(path):
raise web.HTTPError(404)
object_names = []
for root, dirs, files in os.walk(path):
for file_name in files:
object_names.append(os.path.join(root, file_name))
skip = len(path) + 1
for i in range(self.application.bucket_depth):
skip += 2 * (i + 1) + 1
object_names = [n[skip:] for n in object_names]
object_names.sort()
contents = []
start_pos = 0
if marker:
start_pos = bisect.bisect_right(object_names, marker, start_pos)
if prefix:
start_pos = bisect.bisect_left(object_names, prefix, start_pos)
truncated = False
for object_name in object_names[start_pos:]:
if not object_name.startswith(prefix):
break
if len(contents) >= max_keys:
truncated = True
break
object_path = self._object_path(bucket_name, object_name)
c = {"Key": object_name}
if not terse:
info = os.stat(object_path)
c.update(
{
"LastModified": datetime.datetime.utcfromtimestamp(
info.st_mtime
),
"Size": info.st_size,
}
)
contents.append(c)
marker = object_name
self.render_xml(
{
"ListBucketResult": {
"Name": bucket_name,
"Prefix": prefix,
"Marker": marker,
"MaxKeys": max_keys,
"IsTruncated": truncated,
"Contents": contents,
}
}
)
def put(self, bucket_name):
path = os.path.abspath(os.path.join(self.application.directory, bucket_name))
if not path.startswith(self.application.directory) or os.path.exists(path):
raise web.HTTPError(403)
os.makedirs(path)
self.finish()
def delete(self, bucket_name):
path = os.path.abspath(os.path.join(self.application.directory, bucket_name))
if not path.startswith(self.application.directory) or not os.path.isdir(path):
raise web.HTTPError(404)
if len(os.listdir(path)) > 0:
raise web.HTTPError(403)
os.rmdir(path)
self.set_status(204)
self.finish()
| BucketHandler |
python | python-openxml__python-docx | src/docx/oxml/simpletypes.py | {
"start": 4887,
"end": 5191
} | class ____(XsdString):
@classmethod
def validate(cls, value: str) -> None:
cls.validate_string(value)
valid_values = ("none", "left", "right", "all")
if value not in valid_values:
raise ValueError("must be one of %s, got '%s'" % (valid_values, value))
| ST_BrClear |
python | numba__numba | numba/cuda/tests/cudapy/test_extending.py | {
"start": 137,
"end": 2712
} | class ____:
"""
A half-open interval on the real number line.
"""
def __init__(self, lo, hi):
self.lo = lo
self.hi = hi
def __repr__(self):
return 'Interval(%f, %f)' % (self.lo, self.hi)
@property
def width(self):
return self.hi - self.lo
@njit
def interval_width(interval):
return interval.width
@njit
def sum_intervals(i, j):
return Interval(i.lo + j.lo, i.hi + j.hi)
if not config.ENABLE_CUDASIM:
from numba.core import cgutils
from numba.core.extending import (lower_builtin, make_attribute_wrapper,
models, register_model, type_callable,
typeof_impl)
from numba.core.typing.templates import AttributeTemplate
from numba.cuda.cudadecl import registry as cuda_registry
from numba.cuda.cudaimpl import lower_attr as cuda_lower_attr
class IntervalType(types.Type):
def __init__(self):
super().__init__(name='Interval')
interval_type = IntervalType()
@typeof_impl.register(Interval)
def typeof_interval(val, c):
return interval_type
@type_callable(Interval)
def type_interval(context):
def typer(lo, hi):
if isinstance(lo, types.Float) and isinstance(hi, types.Float):
return interval_type
return typer
@register_model(IntervalType)
class IntervalModel(models.StructModel):
def __init__(self, dmm, fe_type):
members = [
('lo', types.float64),
('hi', types.float64),
]
models.StructModel.__init__(self, dmm, fe_type, members)
make_attribute_wrapper(IntervalType, 'lo', 'lo')
make_attribute_wrapper(IntervalType, 'hi', 'hi')
@lower_builtin(Interval, types.Float, types.Float)
def impl_interval(context, builder, sig, args):
typ = sig.return_type
lo, hi = args
interval = cgutils.create_struct_proxy(typ)(context, builder)
interval.lo = lo
interval.hi = hi
return interval._getvalue()
@cuda_registry.register_attr
class Interval_attrs(AttributeTemplate):
key = IntervalType
def resolve_width(self, mod):
return types.float64
@cuda_lower_attr(IntervalType, 'width')
def cuda_Interval_width(context, builder, sig, arg):
lo = builder.extract_value(arg, 0)
hi = builder.extract_value(arg, 1)
return builder.fsub(hi, lo)
@skip_on_cudasim('Extensions not supported in the simulator')
| Interval |
python | google__jax | jax/_src/shard_map.py | {
"start": 29047,
"end": 50974
} | class ____(core.Primitive):
multiple_results = True
def bind(self, *args, **params):
return self._true_bind(*args, **params)
def bind_with_trace(self, trace, fun_and_args, params):
fun: lu.WrappedFun
fun, *args = fun_and_args
return trace.process_shard_map(shard_map_p, fun, args, **params)
def get_bind_params(self, params):
new_params = dict(params)
jaxpr = new_params.pop('jaxpr')
assert isinstance(jaxpr, core.Jaxpr)
subfun = lu.hashable_partial(
lu.wrap_init(core.eval_jaxpr, debug_info=jaxpr.debug_info), jaxpr, ())
axes = new_params.pop('out_specs')
new_params['out_specs_thunk'] = HashableFunction(lambda: axes, closure=axes)
return [subfun], new_params
shard_map_p = ShardMapPrimitive('shard_map')
# Staging
@util.cache(max_size=256, trace_context_in_key=False)
def _as_manual_mesh(mesh, manual_axes: frozenset) -> AbstractMesh:
return mesh.abstract_mesh.update_axis_types(
{n: AxisType.Manual for n in manual_axes})
def _extend_axis_env(mesh, manual_axes):
return core.extend_axis_env_nd([(k, v) for k, v in mesh.shape.items()
if k in manual_axes])
def _shard_map_staging(
trace: pe.DynamicJaxprTrace, prim: core.Primitive, f: lu.WrappedFun,
in_tracers: Sequence[Any], *, mesh: Mesh,
in_specs, out_specs_thunk, check_vma: bool, manual_axes: frozenset,
) -> Sequence[pe.DynamicJaxprTracer]:
source_info = source_info_util.current()
to_jaxpr_tracer = partial(trace.to_jaxpr_tracer, source_info=source_info)
in_tracers = map(to_jaxpr_tracer, in_tracers)
inner_mesh = _as_manual_mesh(mesh, manual_axes)
in_avals = [t.aval for t in in_tracers]
in_avals_ = map(partial(shard_aval, mesh, manual_axes, check_vma), in_specs,
in_avals)
with (_extend_axis_env(mesh, manual_axes), use_abstract_mesh(inner_mesh),
config._check_vma(check_vma)):
jaxpr, out_avals_, consts = pe.trace_to_jaxpr_dynamic(f, in_avals_)
_check_names(out_specs_thunk(), out_avals_)
if check_vma:
out_vma = [v.aval.vma for v in jaxpr.outvars]
_check_vmas(mesh, out_specs_thunk(), out_vma)
out_avals = map(_check_shapedarray, out_avals_)
out_avals = [_check_shapedarray(unshard_aval(mesh, check_vma, spec, aval))
for spec, aval in zip(out_specs_thunk(), out_avals)]
in_specs_staged = (P(),) * len(consts) + tuple(in_specs) # type: ignore
with (_extend_axis_env(mesh, manual_axes), use_abstract_mesh(inner_mesh),
config._check_vma(check_vma)):
jaxpr = pe.convert_constvars_jaxpr(jaxpr)
params = dict(mesh=mesh, in_specs=in_specs_staged,
out_specs=tuple(out_specs_thunk()), jaxpr=jaxpr,
check_vma=check_vma, manual_axes=manual_axes)
effs = core.filter_named_axis_effects(jaxpr.effects, mesh.axis_names)
const_tracers = map(to_jaxpr_tracer, consts)
return trace.emit_eqn([*const_tracers, *in_tracers], out_avals, prim, params,
effs, source_info)
pe.DynamicJaxprTrace.process_shard_map = _shard_map_staging
# TODO add underscore version, for direct-linearize to consume
def _spec_to_names(spec: PartitionSpec):
return {i: names if isinstance(names, tuple) else (names,)
for i, names in enumerate(spec) if names is not None}
def _check_shapedarray(aval: core.AbstractValue) -> core.ShapedArray:
assert isinstance(aval, core.ShapedArray)
return aval
def _shard_shaped_array(mesh: Mesh, manual_axes: frozenset, check_vma,
spec, aval: core.AbstractValue) -> core.AbstractValue:
assert isinstance(aval, core.ShapedArray)
if spec.unreduced != aval.sharding.spec.unreduced:
raise ValueError(
f"in_specs containing unreduced {spec} passed to shard_map should be"
" equal to the unreduced present on the in_aval"
f" {aval.str_short(True)}")
if spec.reduced != aval.sharding.spec.reduced:
raise ValueError(
f"in_specs containing reduced {spec} passed to shard_map should be"
f" equal to the reduced present on the in_aval {aval.str_short(True)}")
names = _spec_to_names(spec)
new_shape = tuple(sz // prod(mesh.shape[n] for n in names.get(i, ()))
for i, sz in enumerate(aval.shape))
manual_mesh = _as_manual_mesh(mesh, manual_axes)
new_sharding = aval.sharding.update(mesh=manual_mesh)
vma = _spec_to_vma(spec) if check_vma else frozenset()
vma = vma | aval.vma
return aval.update(shape=new_shape, sharding=new_sharding, vma=vma)
core.shard_aval_handlers[core.ShapedArray] = _shard_shaped_array
def _unshard_shaped_array(mesh: Mesh, check_vma, spec, aval: core.AbstractValue
) -> core.AbstractValue:
assert isinstance(aval, core.ShapedArray)
if spec.unreduced != aval.sharding.spec.unreduced:
raise ValueError(
"out_specs passed to shard_map should be equal to the unreduced"
f" present on the out_aval. Got out_specs={spec} and"
f" out_aval={aval.str_short(True)}")
if spec.reduced != aval.sharding.spec.reduced:
raise ValueError(
"out_specs passed to shard_map should be equal to the reduced present"
f" on the out_aval. Got out_specs={spec} and"
f" out_aval={aval.str_short(True)}")
names = _spec_to_names(spec)
new_shape = tuple(sz * prod(mesh.shape[n] for n in names.get(i, ()))
for i, sz in enumerate(aval.shape))
names_spec = spec._normalized_spec_for_aval(aval.ndim)
if aval.ndim == 0:
out_spec = P()
else:
out_spec = [] # type: ignore
for name_s, aval_s in zip(names_spec, aval.sharding.spec):
if name_s and not aval_s:
out_spec.append(name_s)
elif aval_s and not name_s:
out_spec.append(aval_s)
elif not name_s and not aval_s:
out_spec.append(None)
else:
assert name_s and aval_s
name_s = name_s if isinstance(name_s, tuple) else (name_s,)
aval_s = aval_s if isinstance(aval_s, tuple) else (aval_s,)
out_spec.append(name_s + aval_s)
out_spec = PartitionSpec(*out_spec, unreduced=spec.unreduced,
reduced=spec.reduced)
new_mesh = (mesh.abstract_mesh if get_abstract_mesh().empty else
get_abstract_mesh())
new_sharding = NamedSharding(new_mesh, out_spec)
manual_axes = set(new_mesh.manual_axes)
vma = (frozenset(v for v in aval.vma if v in manual_axes)
if check_vma else frozenset())
return aval.update(shape=new_shape, sharding=new_sharding, vma=vma)
core.unshard_aval_handlers[core.ShapedArray] = _unshard_shaped_array
# Type-checking
def _shard_map_typecheck(_, *in_atoms, jaxpr, mesh, in_specs, out_specs,
check_vma, manual_axes):
# TODO(mattjj,parkers): check auto
for v, x, in_spec in zip(jaxpr.invars, in_atoms, in_specs):
sharded_aval = shard_aval(mesh, manual_axes, check_vma, in_spec, x.aval)
if not core.typecompat(v.aval, sharded_aval):
raise core.JaxprTypeError("shard_map argument avals not compatible with "
"jaxpr binder avals and in_specs")
with _extend_axis_env(mesh, manual_axes), config._check_vma(check_vma):
core.check_jaxpr(jaxpr)
if check_vma:
out_vma = [v.aval.vma for v in jaxpr.outvars]
for vma, out_spec in zip(out_vma, out_specs):
if not _valid_repeats(mesh, vma, out_spec):
raise core.JaxprTypeError(
"shard_map can't prove output is sufficiently replicated")
out_avals_sharded = [x.aval for x in jaxpr.outvars]
out_avals = map(partial(unshard_aval, mesh, check_vma), out_specs,
out_avals_sharded)
effs = core.filter_named_axis_effects(jaxpr.effects, mesh.axis_names)
return out_avals, effs
core.custom_typechecks[shard_map_p] = _shard_map_typecheck
def _valid_repeats(mesh: Mesh, vma: Set[AxisName], spec) -> bool:
um = set(_unmentioned(mesh, spec)) - set(mesh.manual_axes)
if any(u in vma for u in um):
return False
return True
# Lowering
def _shardy_shard_map_sharding(
ctx: mlir.LoweringRuleContext, mesh, manual_axes, spec, aval_in
) -> sharding_impls.SdyArray:
ns = _make_scoped_manual_sharding(ctx, mesh, spec)
if dtypes.issubdtype(aval_in.dtype, dtypes.extended):
ns = sharding_impls.physical_sharding(aval_in, ns)
aval_in = core.physical_aval(aval_in)
sdy_sharding = ns._to_sdy_sharding(aval_in.ndim)
if len(manual_axes) < len(mesh.axis_names):
for dim_sharding in sdy_sharding.dim_shardings:
dim_sharding.is_open = True
return sdy_sharding
def _get_token_sharding(
ctx: mlir.LoweringRuleContext, mesh
) -> ir.Attribute:
ns = _make_scoped_manual_sharding(ctx, mesh, P())
return ns._to_sdy_sharding(0)
def _get_spmdaxis_ctx_mesh(mesh):
if isinstance(mesh, AbstractMesh):
concrete_mesh = get_concrete_mesh()
return concrete_mesh if not concrete_mesh.empty else mesh
return mesh
def _shard_map_lowering_shardy(
ctx: mlir.LoweringRuleContext, in_nodes,
jaxpr: core.Jaxpr, mesh, in_specs, out_specs, manual_axes, check_vma):
axis_ctx = ctx.module_context.axis_context
in_avals_ = [v.aval for v in jaxpr.invars]
if isinstance(axis_ctx, sharding_impls.SPMDAxisContext):
# Nested `ManualComputationOp`s must only refer to the new manual axes, not
# all existing ones. Grab the newly-added manual axes.
shardy_manual_axes = manual_axes - axis_ctx.manual_axes
else:
shardy_manual_axes = manual_axes
new_axis_context = sharding_impls.SPMDAxisContext(
_get_spmdaxis_ctx_mesh(mesh), manual_axes)
sub_ctx = ctx.module_context.replace(axis_context=new_axis_context)
tokens = [ctx.tokens_in.get(eff) for eff in ctx.tokens_in.effects()]
num_tokens = len(tokens)
manual_axes = order_wrt_mesh(mesh, shardy_manual_axes)
if prod([mesh.shape[a] for a in manual_axes]) == 1:
# No need for a `ManualComputationOp` if all manual axes are size 1.
with _extend_axis_env(mesh, manual_axes), config._check_vma(check_vma):
out_nodes, tokens_out = mlir.jaxpr_subcomp(
sub_ctx, jaxpr, ctx.name_stack,
mlir.TokenSet(zip(ctx.tokens_in.effects(), tokens)),
(), *in_nodes,
dim_var_values=ctx.dim_var_values,
const_lowering=ctx.const_lowering)
ctx.set_tokens_out(tokens_out)
return out_nodes
in_shardings = list(
map(partial(_shardy_shard_map_sharding, ctx, mesh, manual_axes),
in_specs, ctx.avals_in))
const_args_and_avals = core.jaxpr_const_args(jaxpr)
const_args, const_avals = util.unzip2(const_args_and_avals)
num_const_args = len(const_args)
const_arg_values = tuple(
mlir.ir_constant(c, const_lowering=ctx.const_lowering, aval=aval)
for c, aval in const_args_and_avals)
# TODO(necula,yashkatariya): how to construct consts shardy shardings from
# consts that can be ndarray or jax.Array?
const_args_shardings = [
_shardy_shard_map_sharding(ctx, mesh, manual_axes, P(), core.typeof(c))
for c in const_args]
num_dim_vars = len(ctx.dim_var_values)
in_shardings = (
[_get_token_sharding(ctx, mesh)] * (num_tokens + num_dim_vars) +
const_args_shardings + in_shardings)
in_shardings = sharding_impls.SdyArrayList(in_shardings).build()
out_shardings = list(
map(partial(_shardy_shard_map_sharding, ctx, mesh, manual_axes),
out_specs, ctx.avals_out))
out_shardings = [
_get_token_sharding(ctx, mesh)] * num_tokens + out_shardings
out_shardings = sharding_impls.SdyArrayList(out_shardings).build()
output_types = ([hlo.TokenType.get()] * num_tokens +
list(map(mlir.aval_to_ir_type, ctx.avals_out)))
args = (*ctx.dim_var_values, *tokens, *const_arg_values, *in_nodes)
manual_computation_op = sdy.ManualComputationOp(
output_types, mlir.flatten_ir_values(args), in_shardings, out_shardings,
sdy.ManualAxesAttr.get(
ir.ArrayAttr.get([ir.StringAttr.get(i) for i in manual_axes])))
dim_var_types = [mlir.aval_to_ir_type(
core.ShapedArray((), dtypes.default_int_dtype()))] * num_dim_vars
token_types = [hlo.TokenType.get()] * num_tokens
const_arg_types = map(mlir.aval_to_ir_type, const_avals)
in_types = map(mlir.aval_to_ir_type, in_avals_)
block = ir.Block.create_at_start(
manual_computation_op.body,
(*dim_var_types, *token_types, *const_arg_types, *in_types))
with (ir.InsertionPoint(block), _extend_axis_env(mesh, manual_axes),
config._check_vma(check_vma)):
dim_var_values, token_arg_values, const_arg_values, in_args = util.split_list( # type: ignore
block.arguments, [num_dim_vars, num_tokens, num_const_args])
block_const_lowering = {
(id(c), aval): ca
for c, aval, ca in zip(const_args, const_avals, const_arg_values)
}
out_nodes_, tokens_out = mlir.jaxpr_subcomp(
sub_ctx, jaxpr, ctx.name_stack,
mlir.TokenSet(zip(ctx.tokens_in.effects(), token_arg_values)),
(), *in_args,
dim_var_values=dim_var_values,
const_lowering=block_const_lowering)
sdy.ReturnOp([ir.Value(x) for x in (*[v for _, v in tokens_out.items()],
*out_nodes_)])
num_tokens = len(tokens_out.effects())
tokens_out = tokens_out.update_tokens(mlir.TokenSet(zip(
ctx.tokens_in.effects(), manual_computation_op.results[:num_tokens])))
ctx.set_tokens_out(tokens_out)
return manual_computation_op.results[num_tokens:]
def _shard_map_lowering(ctx: mlir.LoweringRuleContext, *in_nodes,
jaxpr: core.Jaxpr, mesh, in_specs, out_specs,
check_vma, manual_axes):
if config.use_shardy_partitioner.value:
return _shard_map_lowering_shardy(
ctx, in_nodes, jaxpr, mesh, in_specs, out_specs, manual_axes, check_vma)
in_avals_ = [v.aval for v in jaxpr.invars]
out_avals_ = [x.aval for x in jaxpr.outvars]
in_nodes_ = map(partial(_xla_shard, ctx, mesh, manual_axes), in_specs,
ctx.avals_in, in_avals_, in_nodes)
new_axis_context = sharding_impls.SPMDAxisContext(
_get_spmdaxis_ctx_mesh(mesh), manual_axes)
sub_ctx = ctx.module_context.replace(axis_context=new_axis_context)
with _extend_axis_env(mesh, manual_axes), config._check_vma(check_vma):
out_nodes_, tokens_out = mlir.call_lowering(
"shmap_body", pe.close_jaxpr(jaxpr), None, sub_ctx, in_avals_,
out_avals_, ctx.tokens_in, *in_nodes_,
dim_var_values=ctx.dim_var_values,
const_lowering=ctx.const_lowering,
arg_names=map(_pspec_mhlo_attrs, in_specs, in_avals_),
result_names=map(_pspec_mhlo_attrs, out_specs, out_avals_))
ctx.set_tokens_out(tokens_out)
return map(partial(_xla_unshard, ctx, mesh, manual_axes), out_specs,
out_avals_, ctx.avals_out, out_nodes_)
mlir.register_lowering(shard_map_p, _shard_map_lowering)
def _make_scoped_manual_sharding(ctx, mesh, spec):
axis_ctx = ctx.module_context.axis_context
mesh = mesh.abstract_mesh
if isinstance(axis_ctx, sharding_impls.SPMDAxisContext):
mesh = mesh.update_axis_types(
{a: AxisType.Manual for a in axis_ctx.manual_axes})
return NamedSharding(mesh, spec)
def _xla_shard(ctx: mlir.LoweringRuleContext, mesh, manual_axes, spec,
aval_in, aval_out, x):
if prod([size for n, size in mesh.shape.items() if n in manual_axes]) == 1:
return x
ns = _make_scoped_manual_sharding(ctx, mesh, spec)
if dtypes.issubdtype(aval_in.dtype, dtypes.extended):
ns = sharding_impls.physical_sharding(aval_in, ns)
aval_in = core.physical_aval(aval_in)
shard_proto = ns._to_xla_hlo_sharding(aval_in.ndim).to_proto()
unspecified = (set(range(aval_in.ndim))
if len(manual_axes) < len(mesh.axis_names) else set())
sx = mlir.wrap_with_sharding_op(ctx, x, aval_in, shard_proto,
unspecified_dims=unspecified)
manual_proto = pxla.manual_proto(
aval_in, manual_axes | set(mesh.manual_axes), mesh)
return mlir.wrap_with_full_to_shard_op(ctx, sx, aval_out, manual_proto,
unspecified)
def _xla_unshard(ctx: mlir.LoweringRuleContext, mesh, manual_axes, spec,
aval_in, aval_out, x):
if prod([size for n, size in mesh.shape.items() if n in manual_axes]) == 1:
return x
ns = _make_scoped_manual_sharding(ctx, mesh, spec)
if dtypes.issubdtype(aval_out.dtype, dtypes.extended):
ns = sharding_impls.physical_sharding(aval_out, ns)
aval_out = core.physical_aval(aval_out)
unspecified = (set(range(aval_in.ndim))
if len(manual_axes) < len(mesh.axis_names) else set())
if dtypes.issubdtype(aval_in.dtype, dtypes.extended):
aval_in = core.physical_aval(aval_in)
manual_proto = pxla.manual_proto(
aval_in, manual_axes | set(mesh.manual_axes), mesh)
sx = mlir.wrap_with_sharding_op(ctx, x, aval_in, manual_proto,
unspecified_dims=unspecified)
shard_proto = ns._to_xla_hlo_sharding(aval_out.ndim).to_proto()
return mlir.wrap_with_shard_to_full_op(ctx, sx, aval_out, shard_proto,
unspecified)
def _pspec_mhlo_attrs(spec, aval: core.AbstractValue) -> str:
if isinstance(aval, core.ShapedArray):
names = _spec_to_names(spec)
return str(map(names.get, range(aval.ndim)))
return ''
# Eager evaluation
def get_mesh_from_args(args_flat, mesh):
for a in args_flat:
if hasattr(a, 'sharding') and isinstance(a.sharding, NamedSharding):
if a.sharding.mesh.shape_tuple != mesh.shape_tuple:
aval = core.shaped_abstractify(a)
raise ValueError(
f"Mesh shape of the input {a.sharding.mesh.shape_tuple} does not"
" match the mesh shape passed to shard_map "
f" {mesh.shape_tuple} for shape {aval.str_short()}")
mesh = a.sharding.mesh
if isinstance(mesh, AbstractMesh):
raise ValueError(
"Please pass `jax.Array`s with a `NamedSharding` as input to"
" `shard_map` when passing `AbstractMesh` to the mesh argument.")
assert isinstance(mesh, Mesh)
return mesh
def _vma_to_spec(mesh, vma):
return P(order_wrt_mesh(mesh, vma))
def _spec_to_vma(spec):
return frozenset(p for s in spec if s is not None
for p in (s if isinstance(s, tuple) else (s,)))
def _shard_map_impl(trace, prim, fun, args, *, mesh, in_specs, out_specs_thunk,
check_vma, manual_axes):
del prim
if isinstance(mesh, AbstractMesh):
concrete_mesh = get_concrete_mesh()
mesh = concrete_mesh if not concrete_mesh.empty else mesh
mesh = get_mesh_from_args(args, mesh)
cur_mesh = get_abstract_mesh()
args = map(partial(_unmatch_spec, mesh, check_vma, cur_mesh, manual_axes),
in_specs, args)
in_vma = map(_spec_to_vma, in_specs)
outs, out_vma = _run_shmap(fun, mesh, manual_axes, args, in_vma, check_vma)
out_avals = [core.mapped_aval(x.shape[0], 0, core.get_aval(x)) for x in outs]
_check_names(out_specs_thunk(), out_avals) # pytype: disable=wrong-arg-types
if check_vma:
_check_vmas(mesh, out_specs_thunk(), out_vma)
src_pspecs = tuple(_vma_to_spec(mesh, r) for r in out_vma)
else:
src_pspecs = tuple(P(order_wrt_mesh(mesh, manual_axes))
for _ in range(len(out_vma)))
dst_pspecs = out_specs_thunk()
return map(partial(_match_spec, mesh, check_vma, manual_axes),
src_pspecs, dst_pspecs, outs)
core.EvalTrace.process_shard_map = _shard_map_impl
def _run_shmap(f, mesh, manual_axes, args, vmas, check_vma):
assert not mesh.manual_axes
trace = ShardMapTrace(mesh, manual_axes, check_vma)
in_tracers = map(partial(ShardMapTracer, trace), vmas, args)
inner_mesh = _as_manual_mesh(mesh, manual_axes)
with (core.set_current_trace(trace), _extend_axis_env(mesh, manual_axes),
use_abstract_mesh(inner_mesh), config._check_vma(check_vma)):
ans = f.call_wrapped(*in_tracers)
outs, out_vma = unzip2(map(trace.to_val_vma_pair, ans))
return outs, out_vma
def _unmatch_spec2(mesh, prev_manual, spec, x) -> JaxType:
with (core.eval_context(), api.disable_jit(False),
use_abstract_mesh(mesh.abstract_mesh)):
return api.jit(HashablePartial(_unmatch2, mesh, prev_manual, spec))(x)
def _unmatch2(mesh, prev_manual, spec, x):
src = P(order_wrt_mesh(mesh, prev_manual), *spec)
newly_manual = _spec_to_vma(spec)
dst = P(order_wrt_mesh(mesh, prev_manual | newly_manual))
return shard_map(lambda x: x, in_specs=src, out_specs=dst)(x)
def _match_spec2(mesh, prev_manual, spec, x) -> JaxType:
with (core.eval_context(), api.disable_jit(False),
use_abstract_mesh(mesh.abstract_mesh)):
return api.jit(HashablePartial(_match2, mesh, prev_manual, spec))(x)
def _match2(mesh, prev_manual, spec, x):
newly_manual = _spec_to_vma(spec)
src = P(order_wrt_mesh(mesh, prev_manual | newly_manual))
dst = P(order_wrt_mesh(mesh, prev_manual), *spec)
return shard_map(lambda x: x, in_specs=src, out_specs=dst)(x)
def _unmatch_spec(mesh: Mesh, check_vma, context_mesh, manual_axes, in_spec,
x: JaxType) -> JaxType:
with (core.eval_context(), api.disable_jit(False),
use_abstract_mesh(context_mesh)):
return api.jit(HashablePartial(_unmatch, mesh, check_vma, in_spec,
manual_axes))(x)
def _unmatch(mesh, check_vma, in_spec, manual_axes, x):
if check_vma:
used_axes = _spec_to_vma(in_spec)
dst = P(order_wrt_mesh(mesh, used_axes))
else:
dst = P(mesh.axis_names)
check_vma = False
return shard_map(_add_singleton, mesh=mesh, in_specs=(in_spec,),
out_specs=dst, check_vma=check_vma, axis_names=manual_axes)(x)
def _check_names(specs, avals: Sequence[core.ShapedArray]) -> None:
fail = [a if sp and len(sp) > a.ndim else no_fail
for sp, a in zip(specs, avals)]
if any(f is not no_fail for f in fail):
raise _SpecError(fail)
| ShardMapPrimitive |
python | Textualize__textual | src/textual/dom.py | {
"start": 3418,
"end": 3503
} | class ____(DOMError):
"""Raised when the node has no associated screen."""
| NoScreen |
python | dagster-io__dagster | examples/project_fully_featured/project_fully_featured/resources/parquet_io_manager.py | {
"start": 361,
"end": 2608
} | class ____(ConfigurableIOManager):
"""This IOManager will take in a pandas or pyspark dataframe and store it in parquet at the
specified path.
It stores outputs for different partitions in different filepaths.
Downstream ops can either load this dataframe into a spark session or simply retrieve a path
to where the data is stored.
"""
pyspark: ResourceDependency[PySparkResource]
@property
def _base_path(self):
raise NotImplementedError()
def handle_output(self, context: OutputContext, obj: Union[pandas.DataFrame, PySparkDataFrame]):
path = self._get_path(context)
if "://" not in self._base_path:
os.makedirs(os.path.dirname(path), exist_ok=True)
if isinstance(obj, pandas.DataFrame):
row_count = len(obj)
context.log.info(f"Row count: {row_count}")
obj.to_parquet(path=path, index=False)
elif isinstance(obj, PySparkDataFrame):
row_count = obj.count()
obj.write.parquet(path=path, mode="overwrite")
else:
raise Exception(f"Outputs of type {type(obj)} not supported.")
context.add_output_metadata({"row_count": row_count, "path": path})
def load_input(self, context) -> Union[PySparkDataFrame, str]:
path = self._get_path(context)
if context.dagster_type.typing_type == PySparkDataFrame:
# return pyspark dataframe
return self.pyspark.spark_session.read.parquet(path)
return check.failed(
f"Inputs of type {context.dagster_type} not supported. Please specify a valid type "
"for this input either on the argument of the @asset-decorated function."
)
def _get_path(self, context: Union[InputContext, OutputContext]):
key = context.asset_key.path[-1]
if context.has_asset_partitions:
start, end = context.asset_partitions_time_window
dt_format = "%Y%m%d%H%M%S"
partition_str = start.strftime(dt_format) + "_" + end.strftime(dt_format)
return os.path.join(self._base_path, key, f"{partition_str}.pq")
else:
return os.path.join(self._base_path, f"{key}.pq")
| PartitionedParquetIOManager |
python | google__jax | tests/key_reuse_test.py | {
"start": 2476,
"end": 11124
} | class ____(jtu.JaxTestCase):
def check_key_reuse(self, *args):
return _core.check_key_reuse(*args)
def test_assertions(self):
key = jax.random.key(0)
self.check_key_reuse(assert_unconsumed, key)
with self.assertRaises(AssertionError):
self.check_key_reuse(assert_consumed, key)
def test_unknown(self):
def f(key):
assert_unconsumed(key)
key2 = apply_unknown_primitive(key)
assert_consumed(key)
assert_consumed(key2)
self.check_key_reuse(f, jax.random.key(0))
def test_consume(self):
def f(key):
assert_unconsumed(key)
key2 = consume(key)
assert_consumed(key)
assert_consumed(key2)
self.check_key_reuse(f, jax.random.key(0))
def test_random_clone(self):
def f(key):
assert_unconsumed(key)
consume(key)
assert_consumed(key)
key2 = jax.random.clone(key)
assert_unconsumed(key2)
self.check_key_reuse(f, jax.random.key(0))
def test_seed(self):
def f():
key = jax.random.key(0)
assert_unconsumed(key)
self.check_key_reuse(f)
def test_split(self):
def f(key):
assert_unconsumed(key)
key2 = jax.random.split(key)
assert_unconsumed(key2)
assert_consumed(key)
self.check_key_reuse(f, jax.random.key(0))
def test_fold_in(self):
def f(key):
assert_unconsumed(key)
key2 = jax.random.fold_in(key, 2)
assert_unconsumed(key)
assert_unconsumed(key2)
self.check_key_reuse(f, jax.random.key(0))
def test_bits(self):
def f(key):
assert_unconsumed(key)
bits = jax.random.bits(key, (), 'uint32')
assert_consumed(key)
return bits
self.check_key_reuse(f, jax.random.key(0))
def test_wrap(self):
def f(key_data):
key = jax.random.wrap_key_data(key_data)
assert_unconsumed(key)
self.check_key_reuse(f, jax.random.PRNGKey(0))
def test_unwrap(self):
def f(key):
assert_unconsumed(key)
key_data = jax.random.key_data(key)
assert_unconsumed(key)
self.check_key_reuse(f, jax.random.key(0))
def test_gamma(self):
def f(key):
assert_unconsumed(key)
values = jax.random.gamma(key, 1.0)
assert_consumed(key)
return values
self.check_key_reuse(f, jax.random.key(0))
def test_broadcast_in_dim(self):
def f(key):
assert_unconsumed(key)
key2 = key[None]
assert_unconsumed(key)
assert_unconsumed(key2)
consume(key)
assert_consumed(key)
assert_consumed(key2)
self.check_key_reuse(f, jax.random.key(0))
def test_copy(self):
def f(key):
assert_unconsumed(key)
key2 = jnp.array(key, copy=True)
assert_unconsumed(key)
assert_unconsumed(key2)
consume(key)
assert_consumed(key)
assert_consumed(key2)
self.check_key_reuse(f, jax.random.key(0))
def test_device_put(self):
def f(key):
assert_unconsumed(key)
key_d = jax.device_put(key)
assert_unconsumed(key_d)
consume(key)
assert_consumed(key_d)
self.check_key_reuse(f, jax.random.key(0))
def test_device_put_multiple(self):
def f(key1, key2):
assert_unconsumed(key1)
assert_unconsumed(key2)
key1_d, key2_d = jax.device_put((key1, key2))
assert_unconsumed(key1_d)
consume(key1)
assert_consumed(key1_d)
assert_unconsumed(key2_d)
consume(key2)
assert_consumed(key2_d)
self.check_key_reuse(f, jax.random.key(0), jax.random.key(1))
def test_squeeze(self):
def f(key):
assert_unconsumed(key)
key2 = jax.lax.squeeze(key, (0,))
assert_unconsumed(key)
assert_unconsumed(key2)
consume(key)
assert_consumed(key)
assert_consumed(key2)
self.check_key_reuse(f, jax.random.key(0)[None])
def test_reshape(self):
def f(key):
assert_unconsumed(key)
key2 = key.reshape(1, *key.shape)
assert_unconsumed(key)
assert_unconsumed(key2)
consume(key)
assert_consumed(key)
assert_consumed(key2)
self.check_key_reuse(f, jax.random.key(0))
def test_concatenate(self):
def f(key1, key2):
assert_unconsumed(key1)
assert_unconsumed(key2)
keys = jax.lax.concatenate([key1, key2], dimension=0)
assert_consumed(key1)
assert_consumed(key2)
assert_unconsumed(keys)
key1 = jax.random.split(jax.random.key(0))
key2 = jax.random.split(jax.random.key(1))
self.check_key_reuse(f, key1, key2)
def test_slice(self):
def f(keys):
assert_unconsumed(keys)
assert_unconsumed(keys[0])
assert_consumed(keys, np.array([True, False]))
assert_unconsumed(keys[1])
assert_consumed(keys, np.array([True, True]))
self.check_key_reuse(f, jax.random.split(jax.random.key(0)))
@parameterized.parameters(operator.eq, operator.ne)
def test_equality_checks(self, op):
def f(key1, key2):
assert_unconsumed(key1)
assert_unconsumed(key2)
result = op(key1, key2)
assert_unconsumed(key1)
assert_unconsumed(key2)
return result
self.check_key_reuse(f, jax.random.key(0), jax.random.key(1))
def test_jit_can_consume_input(self):
def f(key):
assert_unconsumed(key)
ans = jax.jit(jax.random.bits)(key)
assert_consumed(key)
return ans
self.check_key_reuse(f, jax.random.key(0))
def test_jit_can_return_consumed_output(self):
def f():
def g():
key = jax.random.key(0)
assert_unconsumed(key)
bits = jax.random.bits(key)
assert_consumed(key)
return bits, key
_, key = jax.jit(g)()
assert_consumed(key)
self.check_key_reuse(f)
def test_jit_duplicate_inputs(self):
def f(key):
assert_unconsumed(key)
def g(key1, key2):
assert_unconsumed(key1)
assert_unconsumed(key2)
return jax.random.bits(key1)
_ = jax.jit(g)(key, key)
assert_consumed(key)
self.check_key_reuse(f, jax.random.key(0))
def test_jit_propagates_consumption_bit(self):
def f(key):
assert_unconsumed(key)
g = jax.jit(lambda: key)
key2 = g()
assert_unconsumed(key)
assert_unconsumed(key2)
consume(key)
assert_consumed(key)
assert_consumed(key2)
self.check_key_reuse(f, jax.random.key(0))
def test_jit_duplicate_outputs(self):
# TODO(jakevdp): implement this case
def f(key):
assert_unconsumed(key)
def g(key):
return key, key
key1, key2 = jax.jit(g)(key)
assert_unconsumed(key)
assert_unconsumed(key1)
assert_unconsumed(key2)
other = jax.random.bits(key1)
assert_consumed(key)
assert_consumed(key1)
assert_consumed(key2)
return (key1, key2, other)
self.check_key_reuse(f, jax.random.key(0))
def test_cond_both_consumed(self):
@jax.jit
def f(flag, key):
assert_unconsumed(key)
ans = jax.lax.cond(
flag, jax.random.uniform, jax.random.normal, key)
assert_consumed(key)
return ans
self.check_key_reuse(f, True, jax.random.key(0))
def test_cond_one_consumed(self):
@jax.jit
def f(flag, key):
assert_unconsumed(key)
ans = jax.lax.cond(
flag, jax.random.uniform, lambda k: 1.0, key)
assert_consumed(key)
return ans
self.check_key_reuse(f, True, jax.random.key(0))
def test_cond_neither_consumed(self):
@jax.jit
def f(flag, key):
assert_unconsumed(key)
_ = jax.lax.cond(
flag, lambda k: 0.0, lambda k: 1.0, key)
assert_unconsumed(key)
self.check_key_reuse(f, True, jax.random.key(0))
def test_simple_vmap(self):
@jax.jit
def f(seed):
key = jax.random.key(seed)
assert_unconsumed(key)
result = jax.random.uniform(key)
assert_consumed(key)
return result
self.check_key_reuse(f, 0)
self.check_key_reuse(jax.vmap(f), jnp.arange(4))
@parameterized.parameters(*primitives_with_static_signatures)
def test_jaxpr_type_signature(self, primitive):
func, *args = primitives_with_static_signatures[primitive]
signature = _core.key_reuse_signatures[primitive]
jaxpr = jax.make_jaxpr(func)(*args)
self.assertEqual(signature, _core.jaxpr_type_signature(jaxpr.jaxpr))
@parameterized.parameters(*primitives_with_static_signatures)
def test_function_type_signature(self, primitive):
func, *args = primitives_with_static_signatures[primitive]
signature = _core.key_reuse_signatures[primitive]
self.assertEqual(signature, _core.function_type_signature(func, *args))
@jtu.with_config(jax_debug_key_reuse=False)
| KeyReuseUnitTestWithForwarding |
python | django__django | django/core/exceptions.py | {
"start": 1536,
"end": 1718
} | class ____(SuspiciousOperation):
"""
The size of the request (excluding any file uploads) exceeded
settings.DATA_UPLOAD_MAX_MEMORY_SIZE.
"""
pass
| RequestDataTooBig |
python | sphinx-doc__sphinx | sphinx/domains/cpp/_ast.py | {
"start": 34041,
"end": 35169
} | class ____(ASTExpression):
def __init__(self, identifier: ASTIdentifier) -> None:
self.identifier = identifier
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTSizeofParamPack):
return NotImplemented
return self.identifier == other.identifier
def __hash__(self) -> int:
return hash(self.identifier)
def _stringify(self, transform: StringifyTransform) -> str:
return 'sizeof...(' + transform(self.identifier) + ')'
def get_id(self, version: int) -> str:
return 'sZ' + self.identifier.get_id(version)
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
signode += addnodes.desc_sig_keyword('sizeof', 'sizeof')
signode += addnodes.desc_sig_punctuation('...', '...')
signode += addnodes.desc_sig_punctuation('(', '(')
self.identifier.describe_signature(
signode, 'markType', env, symbol=symbol, prefix='', templateArgs=''
)
signode += addnodes.desc_sig_punctuation(')', ')')
| ASTSizeofParamPack |
python | django__django | tests/apps/query_performing_app/apps.py | {
"start": 904,
"end": 1203
} | class ____(BaseAppConfig):
def _perform_query(self):
connection = connections[self.database]
with connection.cursor() as cursor:
cursor.execute("SELECT 42" + connection.features.bare_select_suffix)
self.query_results = cursor.fetchall()
| CursorQueryAppConfig |
python | kamyu104__LeetCode-Solutions | Python/number-of-possible-sets-of-closing-branches.py | {
"start": 101,
"end": 1408
} | class ____(object):
def numberOfSets(self, n, maxDistance, roads):
"""
:type n: int
:type maxDistance: int
:type roads: List[List[int]]
:rtype: int
"""
def check(mask, dist):
return all(dist[i][j] <= maxDistance for i in xrange(n) if mask&(1<<i) for j in xrange(i+1, n) if mask&(1<<j))
def floydWarshall(dist, k):
for i in xrange(len(dist)):
for j in xrange(i+1, len(dist[i])):
dist[j][i] = dist[i][j] = min(dist[i][j], dist[i][k]+dist[k][j])
def backtracking(i, mask, dist):
if i == n:
result[0] += check(mask, dist)
return
for j in xrange(2):
new_dist = [d[:] for d in dist]
if j:
floydWarshall(new_dist, i)
backtracking(i+1, mask|(j<<i), new_dist)
dist = [[0 if u == v else float("inf") for v in xrange(n)] for u in xrange(n)]
for u, v, w in roads:
dist[u][v] = min(dist[u][v], w)
dist[v][u] = min(dist[v][u], w)
result = [0]
backtracking(0, 0, [d[:] for d in dist])
return result[0]
# Time: O(r + 2^n * n^3)
# Space: O(n^2)
# bitmasks, Floyd-Warshall algorithm
| Solution |
python | tensorflow__tensorflow | tensorflow/python/ops/array_ops_shape_test.py | {
"start": 907,
"end": 2069
} | class ____(test.TestCase):
def testShapeInt64Flag(self):
# The tf_shape_default_int64 flag should be set when this test runs
self.assertTrue(flags.config().tf_shape_default_int64.value())
s1 = array_ops.shape_v2(array_ops.zeros([1, 2]))
self.assertEqual(s1.dtype, dtypes.int64)
def testShapeInt64FlagTf1(self):
# The tf_shape_default_int64 flag should be set when this test runs
self.assertTrue(flags.config().tf_shape_default_int64.value())
s1 = array_ops.shape(array_ops.zeros([1, 2]))
self.assertEqual(s1.dtype, dtypes.int64)
def testSizeInt64Flag(self):
# The tf_shape_default_int64 flag should be set when this test runs
self.assertTrue(flags.config().tf_shape_default_int64.value())
s1 = array_ops.size_v2(array_ops.zeros([1, 2]))
self.assertEqual(s1.dtype, dtypes.int64)
def testSizeInt64FlagTf1(self):
# The tf_shape_default_int64 flag should be set when this test runs
self.assertTrue(flags.config().tf_shape_default_int64.value())
s1 = array_ops.size(array_ops.zeros([1, 2]))
self.assertEqual(s1.dtype, dtypes.int64)
if __name__ == "__main__":
test.main()
| ArrayOpShapeSizeTest |
python | tornadoweb__tornado | tornado/test/websocket_test.py | {
"start": 3528,
"end": 3617
} | class ____(RequestHandler):
def get(self):
self.write("ok")
| NonWebSocketHandler |
python | Netflix__metaflow | metaflow/user_configs/config_options.py | {
"start": 2190,
"end": 3278
} | class ____(click.ParamType):
name = "ConvertDictOrStr"
def convert(self, value, param, ctx):
is_default = False
if isinstance(value, str):
if value.startswith(_CONVERT_PREFIX):
return value
if value.startswith(_DEFAULT_PREFIX):
is_default = True
value = value[len(_DEFAULT_PREFIX) :]
return self.convert_value(value, is_default)
@staticmethod
def convert_value(value, is_default):
default_str = _DEFAULT_PREFIX if is_default else ""
if value is None:
return None
if isinstance(value, dict):
return _CONVERT_PREFIX + default_str + json.dumps(value)
if value.startswith(_CONVERT_PREFIX):
return value
return _CONVERT_PREFIX + default_str + value
@staticmethod
def mark_as_default(value):
if value is None:
return None
if isinstance(value, dict):
return _DEFAULT_PREFIX + json.dumps(value)
return _DEFAULT_PREFIX + str(value)
| ConvertDictOrStr |
python | PyCQA__pylint | tests/functional/a/abstract/abstract_method.py | {
"start": 365,
"end": 647
} | class ____(Abstract):
"""Abstract class.
this class is checking that it does not output an error msg for
unimplemeted methods in abstract classes
"""
def cccc(self):
"""should be overridden in concrete class"""
raise NotImplementedError()
| AbstractB |
python | plotly__plotly.py | plotly/graph_objs/isosurface/colorbar/_tickfont.py | {
"start": 233,
"end": 9933
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "isosurface.colorbar"
_path_str = "isosurface.colorbar.tickfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Tickfont object
Sets the color bar's tick label font
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.isosurface.colorbar.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Tickfont
"""
super().__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.isosurface.colorbar.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.isosurface.colorbar.Tickfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Tickfont |
python | doocs__leetcode | solution/3500-3599/3512.Minimum Operations to Make Array Sum Divisible by K/Solution.py | {
"start": 0,
"end": 106
} | class ____:
def minOperations(self, nums: List[int], k: int) -> int:
return sum(nums) % k
| Solution |
python | kamyu104__LeetCode-Solutions | Python/find-array-given-subset-sums.py | {
"start": 7009,
"end": 8125
} | class ____(object):
def recoverArray(self, n, sums):
"""
:type n: int
:type sums: List[int]
:rtype: List[int]
"""
dp = OrderedDict(sorted(collections.Counter(sums).iteritems())) # Time: O(2^n * log(2^n)) = O(n * 2^n)
shift = 0
result = []
for _ in xrange(n): # log(2^n) times, each time costs O(2^(n-len(result))), Total Time: O(2^n)
new_dp = OrderedDict()
it = iter(dp)
min_sum = next(it)
new_shift = min_sum-next(it) if dp[min_sum] == 1 else 0
assert(new_shift <= 0)
for x in dp.iterkeys():
if not dp[x]:
continue
dp[x-new_shift] -= dp[x] if new_shift else dp[x]//2
new_dp[x-new_shift] = dp[x]
dp = new_dp
if shift in dp: # contain 0, choose this side
result.append(new_shift)
else: # contain no 0, choose another side and shift 0 offset
result.append(-new_shift)
shift -= new_shift
return result
| Solution5 |
python | pypa__pipenv | pipenv/vendor/click/testing.py | {
"start": 4241,
"end": 16084
} | class ____:
"""The CLI runner provides functionality to invoke a Click command line
script for unittesting purposes in a isolated environment. This only
works in single-threaded systems without any concurrency as it changes the
global interpreter state.
:param charset: the character set for the input and output data.
:param env: a dictionary with environment variables for overriding.
:param echo_stdin: if this is set to `True`, then reading from stdin writes
to stdout. This is useful for showing examples in
some circumstances. Note that regular prompts
will automatically echo the input.
:param mix_stderr: if this is set to `False`, then stdout and stderr are
preserved as independent streams. This is useful for
Unix-philosophy apps that have predictable stdout and
noisy stderr, such that each may be measured
independently
"""
def __init__(
self,
charset: str = "utf-8",
env: t.Optional[t.Mapping[str, t.Optional[str]]] = None,
echo_stdin: bool = False,
mix_stderr: bool = True,
) -> None:
self.charset = charset
self.env: t.Mapping[str, t.Optional[str]] = env or {}
self.echo_stdin = echo_stdin
self.mix_stderr = mix_stderr
def get_default_prog_name(self, cli: "BaseCommand") -> str:
"""Given a command object it will return the default program name
for it. The default is the `name` attribute or ``"root"`` if not
set.
"""
return cli.name or "root"
def make_env(
self, overrides: t.Optional[t.Mapping[str, t.Optional[str]]] = None
) -> t.Mapping[str, t.Optional[str]]:
"""Returns the environment overrides for invoking a script."""
rv = dict(self.env)
if overrides:
rv.update(overrides)
return rv
@contextlib.contextmanager
def isolation(
self,
input: t.Optional[t.Union[str, bytes, t.IO[t.Any]]] = None,
env: t.Optional[t.Mapping[str, t.Optional[str]]] = None,
color: bool = False,
) -> t.Iterator[t.Tuple[io.BytesIO, t.Optional[io.BytesIO]]]:
"""A context manager that sets up the isolation for invoking of a
command line tool. This sets up stdin with the given input data
and `os.environ` with the overrides from the given dictionary.
This also rebinds some internals in Click to be mocked (like the
prompt functionality).
This is automatically done in the :meth:`invoke` method.
:param input: the input stream to put into sys.stdin.
:param env: the environment overrides as dictionary.
:param color: whether the output should contain color codes. The
application can still override this explicitly.
.. versionchanged:: 8.0
``stderr`` is opened with ``errors="backslashreplace"``
instead of the default ``"strict"``.
.. versionchanged:: 4.0
Added the ``color`` parameter.
"""
bytes_input = make_input_stream(input, self.charset)
echo_input = None
old_stdin = sys.stdin
old_stdout = sys.stdout
old_stderr = sys.stderr
old_forced_width = formatting.FORCED_WIDTH
formatting.FORCED_WIDTH = 80
env = self.make_env(env)
bytes_output = io.BytesIO()
if self.echo_stdin:
bytes_input = echo_input = t.cast(
t.BinaryIO, EchoingStdin(bytes_input, bytes_output)
)
sys.stdin = text_input = _NamedTextIOWrapper(
bytes_input, encoding=self.charset, name="<stdin>", mode="r"
)
if self.echo_stdin:
# Force unbuffered reads, otherwise TextIOWrapper reads a
# large chunk which is echoed early.
text_input._CHUNK_SIZE = 1 # type: ignore
sys.stdout = _NamedTextIOWrapper(
bytes_output, encoding=self.charset, name="<stdout>", mode="w"
)
bytes_error = None
if self.mix_stderr:
sys.stderr = sys.stdout
else:
bytes_error = io.BytesIO()
sys.stderr = _NamedTextIOWrapper(
bytes_error,
encoding=self.charset,
name="<stderr>",
mode="w",
errors="backslashreplace",
)
@_pause_echo(echo_input) # type: ignore
def visible_input(prompt: t.Optional[str] = None) -> str:
sys.stdout.write(prompt or "")
val = text_input.readline().rstrip("\r\n")
sys.stdout.write(f"{val}\n")
sys.stdout.flush()
return val
@_pause_echo(echo_input) # type: ignore
def hidden_input(prompt: t.Optional[str] = None) -> str:
sys.stdout.write(f"{prompt or ''}\n")
sys.stdout.flush()
return text_input.readline().rstrip("\r\n")
@_pause_echo(echo_input) # type: ignore
def _getchar(echo: bool) -> str:
char = sys.stdin.read(1)
if echo:
sys.stdout.write(char)
sys.stdout.flush()
return char
default_color = color
def should_strip_ansi(
stream: t.Optional[t.IO[t.Any]] = None, color: t.Optional[bool] = None
) -> bool:
if color is None:
return not default_color
return not color
old_visible_prompt_func = termui.visible_prompt_func
old_hidden_prompt_func = termui.hidden_prompt_func
old__getchar_func = termui._getchar
old_should_strip_ansi = utils.should_strip_ansi # type: ignore
termui.visible_prompt_func = visible_input
termui.hidden_prompt_func = hidden_input
termui._getchar = _getchar
utils.should_strip_ansi = should_strip_ansi # type: ignore
old_env = {}
try:
for key, value in env.items():
old_env[key] = os.environ.get(key)
if value is None:
try:
del os.environ[key]
except Exception:
pass
else:
os.environ[key] = value
yield (bytes_output, bytes_error)
finally:
for key, value in old_env.items():
if value is None:
try:
del os.environ[key]
except Exception:
pass
else:
os.environ[key] = value
sys.stdout = old_stdout
sys.stderr = old_stderr
sys.stdin = old_stdin
termui.visible_prompt_func = old_visible_prompt_func
termui.hidden_prompt_func = old_hidden_prompt_func
termui._getchar = old__getchar_func
utils.should_strip_ansi = old_should_strip_ansi # type: ignore
formatting.FORCED_WIDTH = old_forced_width
def invoke(
self,
cli: "BaseCommand",
args: t.Optional[t.Union[str, t.Sequence[str]]] = None,
input: t.Optional[t.Union[str, bytes, t.IO[t.Any]]] = None,
env: t.Optional[t.Mapping[str, t.Optional[str]]] = None,
catch_exceptions: bool = True,
color: bool = False,
**extra: t.Any,
) -> Result:
"""Invokes a command in an isolated environment. The arguments are
forwarded directly to the command line script, the `extra` keyword
arguments are passed to the :meth:`~clickpkg.Command.main` function of
the command.
This returns a :class:`Result` object.
:param cli: the command to invoke
:param args: the arguments to invoke. It may be given as an iterable
or a string. When given as string it will be interpreted
as a Unix shell command. More details at
:func:`shlex.split`.
:param input: the input data for `sys.stdin`.
:param env: the environment overrides.
:param catch_exceptions: Whether to catch any other exceptions than
``SystemExit``.
:param extra: the keyword arguments to pass to :meth:`main`.
:param color: whether the output should contain color codes. The
application can still override this explicitly.
.. versionchanged:: 8.0
The result object has the ``return_value`` attribute with
the value returned from the invoked command.
.. versionchanged:: 4.0
Added the ``color`` parameter.
.. versionchanged:: 3.0
Added the ``catch_exceptions`` parameter.
.. versionchanged:: 3.0
The result object has the ``exc_info`` attribute with the
traceback if available.
"""
exc_info = None
with self.isolation(input=input, env=env, color=color) as outstreams:
return_value = None
exception: t.Optional[BaseException] = None
exit_code = 0
if isinstance(args, str):
args = shlex.split(args)
try:
prog_name = extra.pop("prog_name")
except KeyError:
prog_name = self.get_default_prog_name(cli)
try:
return_value = cli.main(args=args or (), prog_name=prog_name, **extra)
except SystemExit as e:
exc_info = sys.exc_info()
e_code = t.cast(t.Optional[t.Union[int, t.Any]], e.code)
if e_code is None:
e_code = 0
if e_code != 0:
exception = e
if not isinstance(e_code, int):
sys.stdout.write(str(e_code))
sys.stdout.write("\n")
e_code = 1
exit_code = e_code
except Exception as e:
if not catch_exceptions:
raise
exception = e
exit_code = 1
exc_info = sys.exc_info()
finally:
sys.stdout.flush()
stdout = outstreams[0].getvalue()
if self.mix_stderr:
stderr = None
else:
stderr = outstreams[1].getvalue() # type: ignore
return Result(
runner=self,
stdout_bytes=stdout,
stderr_bytes=stderr,
return_value=return_value,
exit_code=exit_code,
exception=exception,
exc_info=exc_info, # type: ignore
)
@contextlib.contextmanager
def isolated_filesystem(
self, temp_dir: t.Optional[t.Union[str, "os.PathLike[str]"]] = None
) -> t.Iterator[str]:
"""A context manager that creates a temporary directory and
changes the current working directory to it. This isolates tests
that affect the contents of the CWD to prevent them from
interfering with each other.
:param temp_dir: Create the temporary directory under this
directory. If given, the created directory is not removed
when exiting.
.. versionchanged:: 8.0
Added the ``temp_dir`` parameter.
"""
cwd = os.getcwd()
dt = tempfile.mkdtemp(dir=temp_dir)
os.chdir(dt)
try:
yield dt
finally:
os.chdir(cwd)
if temp_dir is None:
try:
shutil.rmtree(dt)
except OSError: # noqa: B014
pass
| CliRunner |
python | wandb__wandb | wandb/automations/_filters/expressions.py | {
"start": 612,
"end": 4164
} | class ____:
"""A descriptor that can be used to define a "filterable" field on a class.
Internal helper to support syntactic sugar for defining event filters.
"""
_python_name: str #: The name of the field this descriptor was assigned to in the Python class.
_server_name: str | None #: If set, the actual server-side field name to filter on.
def __init__(self, server_name: str | None = None):
self._server_name = server_name
def __set_name__(self, owner: type, name: str) -> None:
self._python_name = name
def __get__(self, obj: Any, objtype: type) -> Self:
# By default, if we didn't explicitly provide a backend name for
# filtering, assume the field has the same name in the backend as
# the python attribute.
return self
@property
def _name(self) -> str:
return self._server_name or self._python_name
def __str__(self) -> str:
return self._name
def __repr__(self) -> str:
return f"{nameof(type(self))}({self._name!r})"
# Methods to define filter expressions through chaining
def matches_regex(self, pattern: str, /) -> FilterExpr:
return FilterExpr(field=self._name, op=Regex(val=pattern))
def contains(self, text: str, /) -> FilterExpr:
return FilterExpr(field=self._name, op=Contains(val=text))
def exists(self, exists: bool = True, /) -> FilterExpr:
return FilterExpr(field=self._name, op=Exists(val=exists))
def lt(self, value: Scalar, /) -> FilterExpr:
return FilterExpr(field=self._name, op=Lt(val=value))
def gt(self, value: Scalar, /) -> FilterExpr:
return FilterExpr(field=self._name, op=Gt(val=value))
def lte(self, value: Scalar, /) -> FilterExpr:
return FilterExpr(field=self._name, op=Lte(val=value))
def gte(self, value: Scalar, /) -> FilterExpr:
return FilterExpr(field=self._name, op=Gte(val=value))
def ne(self, value: Scalar, /) -> FilterExpr:
return FilterExpr(field=self._name, op=Ne(val=value))
def eq(self, value: Scalar, /) -> FilterExpr:
return FilterExpr(field=self._name, op=Eq(val=value))
def in_(self, values: Iterable[Scalar], /) -> FilterExpr:
return FilterExpr(field=self._name, op=In(val=values))
def not_in(self, values: Iterable[Scalar], /) -> FilterExpr:
return FilterExpr(field=self._name, op=NotIn(val=values))
# Deliberately override the default behavior of comparison operator symbols,
# (`<`, `>`, `<=`, `>=`, `==`, `!=`), to allow defining filter expressions
# idiomatically, e.g. `field == "value"`.
#
# See similar overrides of built-in dunder methods in common libraries like
# `sqlalchemy`, `polars`, `pandas`, `numpy`, etc.
#
# As an illustrative example from `sqlalchemy`, see:
# https://github.com/sqlalchemy/sqlalchemy/blob/f21ae633486380a26dc0b67b70ae1c0efc6b4dc4/lib/sqlalchemy/orm/descriptor_props.py#L808-L812
def __lt__(self, other: Any) -> FilterExpr:
return self.lt(other)
def __gt__(self, other: Any) -> FilterExpr:
return self.gt(other)
def __le__(self, other: Any) -> FilterExpr:
return self.lte(other)
def __ge__(self, other: Any) -> FilterExpr:
return self.gte(other)
def __eq__(self, other: Any) -> FilterExpr:
return self.eq(other)
def __ne__(self, other: Any) -> FilterExpr:
return self.ne(other)
# ------------------------------------------------------------------------------
| FilterableField |
python | weaviate__weaviate-python-client | weaviate/collections/classes/grpc.py | {
"start": 3166,
"end": 4720
} | class ____:
vector: bool
uuid: bool = True
creation_time_unix: bool = False
last_update_time_unix: bool = False
distance: bool = False
certainty: bool = False
score: bool = False
explain_score: bool = False
is_consistent: bool = False
vectors: Optional[List[str]] = None
@classmethod
def from_public(
cls, public: Optional[MetadataQuery], include_vector: INCLUDE_VECTOR
) -> "_MetadataQuery":
return (
cls(
vector=include_vector if isinstance(include_vector, bool) else False,
vectors=include_vector if isinstance(include_vector, list) else None,
)
if public is None
else cls(
vector=include_vector if isinstance(include_vector, bool) else False,
vectors=include_vector if isinstance(include_vector, list) else None,
creation_time_unix=public.creation_time,
last_update_time_unix=public.last_update_time,
distance=public.distance,
certainty=public.certainty,
score=public.score,
explain_score=public.explain_score,
is_consistent=public.is_consistent,
)
)
METADATA = Union[
List[
Literal[
"creation_time",
"last_update_time",
"distance",
"certainty",
"score",
"explain_score",
"is_consistent",
]
],
MetadataQuery,
]
| _MetadataQuery |
python | spack__spack | lib/spack/spack/error.py | {
"start": 4287,
"end": 4375
} | class ____(SpackError):
"""Raised when a patch file doesn't exist."""
| NoSuchPatchError |
python | dagster-io__dagster | python_modules/dagster/dagster/components/utils/translation.py | {
"start": 6527,
"end": 8324
} | class ____(Generic[T_Component]):
"""To support python versions < 3.10, we need to use a Protocol to tell the type system that
these generated classes have a component property.
"""
def __init__(self, component: T_Component, *args, **kwargs):
self._component = component
super().__init__(*args, **kwargs)
@property
def component(self) -> T_Component:
return self._component
def create_component_translator_cls(
base_component_cls: type[T_Component], base_translator_cls: type[T_Translator]
) -> type[T_Translator]:
class _GeneratedComponentTranslator(base_translator_cls, ComponentTranslator[T_Component]): # type: ignore
def _shim_method(self, method_name: str) -> Callable:
component_base_method = getattr(base_component_cls, method_name)
component_instance_method = getattr(self._component.__class__, method_name)
if component_base_method is not component_instance_method:
# if the user has overridden the method, we invoke the instance method
return functools.partial(component_instance_method, self._component)
else:
# we never invoke the component_base_method directly. instead, if the user
# has not overridden the method, we invoke the original translator class method
return getattr(super(), method_name)
def get_asset_spec(self, *args, **kwargs) -> AssetSpec:
return self._shim_method("get_asset_spec")(*args, **kwargs)
def get_asset_check_spec(self, *args, **kwargs) -> Optional[AssetCheckSpec]:
return self._shim_method("get_asset_check_spec")(*args, **kwargs)
return cast("type[T_Translator]", _GeneratedComponentTranslator)
| ComponentTranslator |
python | pypa__packaging | tests/test_tags.py | {
"start": 14294,
"end": 17476
} | class ____:
@pytest.mark.usefixtures("mock_ios")
def test_version_detection(self) -> None:
platforms = list(tags.ios_platforms(multiarch="arm64-iphoneos"))
assert platforms == [
"ios_13_2_arm64_iphoneos",
"ios_13_1_arm64_iphoneos",
"ios_13_0_arm64_iphoneos",
"ios_12_9_arm64_iphoneos",
"ios_12_8_arm64_iphoneos",
"ios_12_7_arm64_iphoneos",
"ios_12_6_arm64_iphoneos",
"ios_12_5_arm64_iphoneos",
"ios_12_4_arm64_iphoneos",
"ios_12_3_arm64_iphoneos",
"ios_12_2_arm64_iphoneos",
"ios_12_1_arm64_iphoneos",
"ios_12_0_arm64_iphoneos",
]
@pytest.mark.usefixtures("mock_ios")
def test_multiarch_detection(self) -> None:
platforms = list(tags.ios_platforms(version=(12, 0)))
assert platforms == ["ios_12_0_gothic_iphoneos"]
@pytest.mark.usefixtures("mock_ios")
def test_ios_platforms(self) -> None:
# Pre-iOS 12.0 releases won't match anything
platforms = list(tags.ios_platforms((7, 0), "arm64-iphoneos"))
assert platforms == []
# iOS 12.0 returns exactly 1 match
platforms = list(tags.ios_platforms((12, 0), "arm64-iphoneos"))
assert platforms == ["ios_12_0_arm64_iphoneos"]
# iOS 13.0 returns a match for 13.0, plus every 12.X
platforms = list(tags.ios_platforms((13, 0), "x86_64-iphonesimulator"))
assert platforms == [
"ios_13_0_x86_64_iphonesimulator",
"ios_12_9_x86_64_iphonesimulator",
"ios_12_8_x86_64_iphonesimulator",
"ios_12_7_x86_64_iphonesimulator",
"ios_12_6_x86_64_iphonesimulator",
"ios_12_5_x86_64_iphonesimulator",
"ios_12_4_x86_64_iphonesimulator",
"ios_12_3_x86_64_iphonesimulator",
"ios_12_2_x86_64_iphonesimulator",
"ios_12_1_x86_64_iphonesimulator",
"ios_12_0_x86_64_iphonesimulator",
]
# iOS 14.3 returns a match for 14.3-14.0, plus every 13.X and every 12.X
platforms = list(tags.ios_platforms((14, 3), "arm64-iphoneos"))
assert platforms == [
"ios_14_3_arm64_iphoneos",
"ios_14_2_arm64_iphoneos",
"ios_14_1_arm64_iphoneos",
"ios_14_0_arm64_iphoneos",
"ios_13_9_arm64_iphoneos",
"ios_13_8_arm64_iphoneos",
"ios_13_7_arm64_iphoneos",
"ios_13_6_arm64_iphoneos",
"ios_13_5_arm64_iphoneos",
"ios_13_4_arm64_iphoneos",
"ios_13_3_arm64_iphoneos",
"ios_13_2_arm64_iphoneos",
"ios_13_1_arm64_iphoneos",
"ios_13_0_arm64_iphoneos",
"ios_12_9_arm64_iphoneos",
"ios_12_8_arm64_iphoneos",
"ios_12_7_arm64_iphoneos",
"ios_12_6_arm64_iphoneos",
"ios_12_5_arm64_iphoneos",
"ios_12_4_arm64_iphoneos",
"ios_12_3_arm64_iphoneos",
"ios_12_2_arm64_iphoneos",
"ios_12_1_arm64_iphoneos",
"ios_12_0_arm64_iphoneos",
]
| TestIOSPlatforms |
python | walkccc__LeetCode | solutions/1552. Magnetic Force Between Two Balls/1552.py | {
"start": 0,
"end": 499
} | class ____:
def maxDistance(self, position: list[int], m: int) -> int:
position.sort()
l = 1
r = position[-1] - position[0]
def numBalls(force: int) -> int:
balls = 0
prevPosition = -force
for pos in position:
if pos - prevPosition >= force:
balls += 1
prevPosition = pos
return balls
while l < r:
mid = r - (r - l) // 2
if numBalls(mid) >= m:
l = mid
else:
r = mid - 1
return l
| Solution |
python | allegroai__clearml | clearml/backend_api/services/v2_20/tasks.py | {
"start": 201061,
"end": 224346
} | class ____(Request):
"""
Edit task's details.
:param task: ID of the task
:type task: str
:param force: If not true, call fails if the task status is not 'created'
:type force: bool
:param name: Task name Unique within the company.
:type name: str
:param tags: User-defined tags list
:type tags: Sequence[str]
:param system_tags: System tags list. This field is reserved for system use,
please don't use it.
:type system_tags: Sequence[str]
:param type: Type of task
:type type: TaskTypeEnum
:param comment: Free text comment
:type comment: str
:param parent: Parent task id Must be a completed task.
:type parent: str
:param project: Project ID of the project to which this task is assigned Must
exist[ab]
:type project: str
:param output_dest: Output storage id Must be a reference to an existing
storage.
:type output_dest: str
:param execution: Task execution params
:type execution: Execution
:param hyperparams: Task hyper params per section
:type hyperparams: dict
:param configuration: Task configuration params
:type configuration: dict
:param script: Script info
:type script: Script
:param models: Task models
:type models: TaskModels
:param container: Docker container parameters
:type container: dict
:param runtime: Task runtime mapping
:type runtime: dict
"""
_service = "tasks"
_action = "edit"
_version = "2.20"
_schema = {
"definitions": {
"artifact": {
"properties": {
"content_size": {
"description": "Raw data length in bytes",
"type": "integer",
},
"display_data": {
"description": "User-defined list of key/value pairs, sorted",
"items": {"items": {"type": "string"}, "type": "array"},
"type": "array",
},
"hash": {
"description": "Hash of entire raw data",
"type": "string",
},
"key": {"description": "Entry key", "type": "string"},
"mode": {
"$ref": "#/definitions/artifact_mode_enum",
"description": "System defined input/output indication",
},
"timestamp": {
"description": "Epoch time when artifact was created",
"type": "integer",
},
"type": {"description": "System defined type", "type": "string"},
"type_data": {
"$ref": "#/definitions/artifact_type_data",
"description": "Additional fields defined by the system",
},
"uri": {"description": "Raw data location", "type": "string"},
},
"required": ["key", "type"],
"type": "object",
},
"artifact_mode_enum": {
"default": "output",
"enum": ["input", "output"],
"type": "string",
},
"artifact_type_data": {
"properties": {
"content_type": {
"description": "System defined raw data content type",
"type": ["string", "null"],
},
"data_hash": {
"description": "Hash of raw data, without any headers or descriptive parts",
"type": ["string", "null"],
},
"preview": {
"description": "Description or textual data",
"type": ["string", "null"],
},
},
"type": "object",
},
"configuration_item": {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. Should be unique",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
},
"execution": {
"properties": {
"artifacts": {
"description": "Task artifacts",
"items": {"$ref": "#/definitions/artifact"},
"type": ["array", "null"],
},
"framework": {
"description": "Framework related to the task. Case insensitive. Mandatory for Training tasks. ",
"type": ["string", "null"],
},
"model_desc": {
"additionalProperties": True,
"description": "Json object representing the Model descriptors",
"type": ["object", "null"],
},
"model_labels": {
"additionalProperties": {"type": "integer"},
"description": "Json object representing the ids of the labels in the model.\n The keys are the layers' names and the values are the IDs.\n Not applicable for Register (Import) tasks.\n Mandatory for Training tasks",
"type": ["object", "null"],
},
"parameters": {
"additionalProperties": True,
"description": "Json object containing the Task parameters",
"type": ["object", "null"],
},
"queue": {
"description": "Queue ID where task was queued.",
"type": ["string", "null"],
},
},
"type": "object",
},
"params_item": {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. The combination of section and name should be unique",
"type": ["string", "null"],
},
"section": {
"description": "Section that the parameter belongs to",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
},
"script": {
"properties": {
"binary": {
"default": "python",
"description": "Binary to use when running the script",
"type": ["string", "null"],
},
"branch": {
"description": "Repository branch id If not provided and tag not provided, default repository branch is used.",
"type": ["string", "null"],
},
"diff": {
"description": "Uncommitted changes found in the repository when task was run",
"type": ["string", "null"],
},
"entry_point": {
"description": "Path to execute within the repository",
"type": ["string", "null"],
},
"repository": {
"description": "Name of the repository where the script is located",
"type": ["string", "null"],
},
"requirements": {
"description": "A JSON object containing requirements strings by key",
"type": ["object", "null"],
},
"tag": {
"description": "Repository tag",
"type": ["string", "null"],
},
"version_num": {
"description": "Version (changeset) number. Optional (default is head version) Unused if tag is provided.",
"type": ["string", "null"],
},
"working_dir": {
"description": "Path to the folder from which to run the script Default - root folder of repository",
"type": ["string", "null"],
},
},
"type": "object",
},
"section_params": {
"additionalProperties": {"$ref": "#/definitions/params_item"},
"description": "Task section params",
"type": "object",
},
"task_model_item": {
"properties": {
"model": {"description": "The model ID", "type": "string"},
"name": {"description": "The task model name", "type": "string"},
},
"required": ["name", "model"],
"type": "object",
},
"task_models": {
"properties": {
"input": {
"description": "The list of task input models",
"items": {"$ref": "#/definitions/task_model_item"},
"type": ["array", "null"],
},
"output": {
"description": "The list of task output models",
"items": {"$ref": "#/definitions/task_model_item"},
"type": ["array", "null"],
},
},
"type": "object",
},
"task_type_enum": {
"enum": [
"training",
"testing",
"inference",
"data_processing",
"application",
"monitor",
"controller",
"optimizer",
"service",
"qc",
"custom",
],
"type": "string",
},
},
"properties": {
"comment": {"description": "Free text comment ", "type": "string"},
"configuration": {
"additionalProperties": {"$ref": "#/definitions/configuration_item"},
"description": "Task configuration params",
"type": "object",
},
"container": {
"type": "object",
"description": "Docker container parameters",
"additionalProperties": {"type": ["string", "null"]},
},
"execution": {
"$ref": "#/definitions/execution",
"description": "Task execution params",
},
"force": {
"default": False,
"description": "If not true, call fails if the task status is not 'created'",
"type": "boolean",
},
"hyperparams": {
"additionalProperties": {"$ref": "#/definitions/section_params"},
"description": "Task hyper params per section",
"type": "object",
},
"models": {
"$ref": "#/definitions/task_models",
"description": "Task models",
},
"name": {
"description": "Task name Unique within the company.",
"type": "string",
},
"output_dest": {
"description": "Output storage id Must be a reference to an existing storage.",
"type": "string",
},
"parent": {
"description": "Parent task id Must be a completed task.",
"type": "string",
},
"project": {
"description": "Project ID of the project to which this task is assigned Must exist[ab]",
"type": "string",
},
"runtime": {
"description": "Task runtime mapping",
"type": ["object", "null"],
"additionalProperties": True,
},
"script": {"$ref": "#/definitions/script", "description": "Script info"},
"system_tags": {
"description": "System tags list. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": "array",
},
"tags": {
"description": "User-defined tags list",
"items": {"type": "string"},
"type": "array",
},
"task": {"description": "ID of the task", "type": "string"},
"type": {
"$ref": "#/definitions/task_type_enum",
"description": "Type of task",
},
},
"required": ["task"],
"type": "object",
}
def __init__(
self,
task: str,
force: Optional[bool] = False,
name: Optional[str] = None,
tags: Optional[List[str]] = None,
system_tags: Optional[List[str]] = None,
type: Any = None,
comment: Optional[str] = None,
parent: Optional[str] = None,
project: Optional[str] = None,
output_dest: Optional[str] = None,
execution: Any = None,
hyperparams: Optional[dict] = None,
configuration: Optional[dict] = None,
script: Any = None,
models: Any = None,
container: Optional[dict] = None,
runtime: Optional[dict] = None,
**kwargs: Any
) -> None:
super(EditRequest, self).__init__(**kwargs)
self.task = task
self.force = force
self.name = name
self.tags = tags
self.system_tags = system_tags
self.type = type
self.comment = comment
self.parent = parent
self.project = project
self.output_dest = output_dest
self.execution = execution
self.hyperparams = hyperparams
self.configuration = configuration
self.script = script
self.models = models
self.container = container
self.runtime = runtime
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("force")
def force(self) -> Optional[bool]:
return self._property_force
@force.setter
def force(self, value: Optional[bool]) -> None:
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
@schema_property("name")
def name(self) -> Optional[str]:
return self._property_name
@name.setter
def name(self, value: Optional[str]) -> None:
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("type")
def type(self) -> Any:
return self._property_type
@type.setter
def type(self, value: Any) -> None:
if value is None:
self._property_type = None
return
if isinstance(value, six.string_types):
try:
value = TaskTypeEnum(value)
except ValueError:
pass
else:
self.assert_isinstance(value, "type", enum.Enum)
self._property_type = value
@schema_property("comment")
def comment(self) -> Optional[str]:
return self._property_comment
@comment.setter
def comment(self, value: Optional[str]) -> None:
if value is None:
self._property_comment = None
return
self.assert_isinstance(value, "comment", six.string_types)
self._property_comment = value
@schema_property("parent")
def parent(self) -> Optional[str]:
return self._property_parent
@parent.setter
def parent(self, value: Optional[str]) -> None:
if value is None:
self._property_parent = None
return
self.assert_isinstance(value, "parent", six.string_types)
self._property_parent = value
@schema_property("project")
def project(self) -> Optional[str]:
return self._property_project
@project.setter
def project(self, value: Optional[str]) -> None:
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", six.string_types)
self._property_project = value
@schema_property("output_dest")
def output_dest(self) -> Optional[str]:
return self._property_output_dest
@output_dest.setter
def output_dest(self, value: Optional[str]) -> None:
if value is None:
self._property_output_dest = None
return
self.assert_isinstance(value, "output_dest", six.string_types)
self._property_output_dest = value
@schema_property("execution")
def execution(self) -> Any:
return self._property_execution
@execution.setter
def execution(self, value: Any) -> None:
if value is None:
self._property_execution = None
return
if isinstance(value, dict):
value = Execution.from_dict(value)
else:
self.assert_isinstance(value, "execution", Execution)
self._property_execution = value
@schema_property("hyperparams")
def hyperparams(self) -> Optional[dict]:
return self._property_hyperparams
@hyperparams.setter
def hyperparams(self, value: Optional[dict]) -> None:
if value is None:
self._property_hyperparams = None
return
self.assert_isinstance(value, "hyperparams", dict)
self.assert_isinstance(value.keys(), "hyperparams_keys", six.string_types, is_array=True)
self.assert_isinstance(value.values(), "hyperparams_values", (SectionParams, dict), is_array=True)
value = dict(((k, SectionParams(**v) if isinstance(v, dict) else v) for (k, v) in value.items()))
self._property_hyperparams = value
@schema_property("configuration")
def configuration(self) -> Optional[dict]:
return self._property_configuration
@configuration.setter
def configuration(self, value: Optional[dict]) -> None:
if value is None:
self._property_configuration = None
return
self.assert_isinstance(value, "configuration", dict)
self.assert_isinstance(value.keys(), "configuration_keys", six.string_types, is_array=True)
self.assert_isinstance(
value.values(),
"configuration_values",
(ConfigurationItem, dict),
is_array=True,
)
value = dict(((k, ConfigurationItem(**v) if isinstance(v, dict) else v) for (k, v) in value.items()))
self._property_configuration = value
@schema_property("script")
def script(self) -> Any:
return self._property_script
@script.setter
def script(self, value: Any) -> None:
if value is None:
self._property_script = None
return
if isinstance(value, dict):
value = Script.from_dict(value)
else:
self.assert_isinstance(value, "script", Script)
self._property_script = value
@schema_property("models")
def models(self) -> Any:
return self._property_models
@models.setter
def models(self, value: Any) -> None:
if value is None:
self._property_models = None
return
if isinstance(value, dict):
value = TaskModels.from_dict(value)
else:
self.assert_isinstance(value, "models", TaskModels)
self._property_models = value
@schema_property("container")
def container(self) -> Optional[dict]:
return self._property_container
@container.setter
def container(self, value: Optional[dict]) -> None:
if value is None:
self._property_container = None
return
self.assert_isinstance(value, "container", (dict,))
self._property_container = value
@schema_property("runtime")
def runtime(self) -> Optional[dict]:
return self._property_runtime
@runtime.setter
def runtime(self, value: Optional[dict]) -> None:
if value is None:
self._property_runtime = None
return
self.assert_isinstance(value, "runtime", (dict,))
self._property_runtime = value
| EditRequest |
python | huggingface__transformers | src/transformers/models/sam_hq/modular_sam_hq.py | {
"start": 8375,
"end": 8434
} | class ____(SamPreTrainedModel):
pass
| SamHQPreTrainedModel |
python | doocs__leetcode | lcci/17.12.BiNode/Solution.py | {
"start": 164,
"end": 587
} | class ____:
def convertBiNode(self, root: TreeNode) -> TreeNode:
def dfs(root):
if root is None:
return
nonlocal prev
dfs(root.left)
prev.right = root
root.left = None
prev = root
dfs(root.right)
dummy = TreeNode(val=0, right=root)
prev = dummy
dfs(root)
return dummy.right
| Solution |
python | wandb__wandb | wandb/sdk/mailbox/mailbox_handle.py | {
"start": 3231,
"end": 4072
} | class ____(Generic[_S], MailboxHandle[_S]):
"""A mailbox handle whose result is derived from another handle."""
def __init__(
self,
handle: MailboxHandle[_T],
fn: Callable[[_T], _S],
) -> None:
super().__init__(handle.asyncer)
self._handle = handle
self._fn = fn
@override
def abandon(self) -> None:
self._handle.abandon()
@override
def cancel(self, iface: interface.InterfaceBase) -> None:
self._handle.cancel(iface)
@override
def wait_or(self, *, timeout: float | None) -> _S:
return self._fn(self._handle.wait_or(timeout=timeout))
@override
async def wait_async(self, *, timeout: float | None) -> _S:
response = await self._handle.wait_async(timeout=timeout)
return self._fn(response)
| _MailboxMappedHandle |
python | anthropics__anthropic-sdk-python | src/anthropic/lib/foundry.py | {
"start": 1084,
"end": 1329
} | class ____(AnthropicError):
def __init__(self) -> None:
super().__init__(
"The `api_key` and `azure_ad_token_provider` arguments are mutually exclusive; Only one can be passed at a time"
)
| MutuallyExclusiveAuthError |
python | readthedocs__readthedocs.org | readthedocs/search/migrations/0005_alter_searchquery_id.py | {
"start": 149,
"end": 573
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("search", "0004_make_total_results_not_null"),
]
operations = [
migrations.AlterField(
model_name="searchquery",
name="id",
field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
]
| Migration |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/methodOverride6.py | {
"start": 4409,
"end": 4664
} | class ____(Parent4[int]):
@overload
def function(self: Parent4[int], a: None) -> float: ...
@overload
def function(self: Parent4[int], a: int) -> float: ...
def function(self, a: int | None = None) -> float:
return 0.0
| Child4_1 |
python | plotly__plotly.py | tests/test_core/test_graph_objs/test_constructor.py | {
"start": 77,
"end": 1504
} | class ____(TestCase):
def test_kwarg(self):
m = go.scatter.Marker(color="green")
self.assertEqual(m.to_plotly_json(), {"color": "green"})
def test_valid_arg_dict(self):
m = go.scatter.Marker(dict(color="green"))
self.assertEqual(m.to_plotly_json(), {"color": "green"})
def test_valid_underscore_kwarg(self):
m = go.scatter.Marker(line_color="green")
self.assertEqual(m.to_plotly_json(), {"line": {"color": "green"}})
def test_valid_arg_obj(self):
m = go.scatter.Marker(go.scatter.Marker(color="green"))
self.assertEqual(m.to_plotly_json(), {"color": "green"})
def test_kwarg_takes_precedence(self):
m = go.scatter.Marker(dict(color="green", size=12), color="blue", opacity=0.6)
self.assertEqual(
m.to_plotly_json(), {"color": "blue", "size": 12, "opacity": 0.6}
)
def test_invalid_kwarg(self):
with pytest.raises(ValueError):
go.scatter.Marker(bogus=[1, 2, 3])
def test_invalid_arg(self):
with pytest.raises(ValueError):
go.scatter.Marker([1, 2, 3])
def test_valid_arg_with_invalid_key_name(self):
with pytest.raises(ValueError):
go.scatter.Marker({"bogus": 12})
def test_valid_arg_with_invalid_key_value(self):
with pytest.raises(ValueError):
go.scatter.Marker({"color": "bogus"})
| TestGraphObjConstructor |
python | getsentry__sentry | src/sentry/identity/oauth2.py | {
"start": 2066,
"end": 7456
} | class ____(Provider):
"""
The OAuth2Provider is a generic way to implement an identity provider that
uses the OAuth 2.0 protocol as a means for authenticating a user.
OAuth scopes are configured through the oauth_scopes class property,
however may be overridden using the ``config['oauth_scopes']`` object.
"""
oauth_access_token_url = ""
oauth_authorize_url = ""
refresh_token_url = ""
oauth_scopes: tuple[str, ...] = ()
def _get_oauth_parameter(self, parameter_name):
"""
Lookup an OAuth parameter for the provider. Depending on the context of the
pipeline using the provider, the parameter may come from 1 of 3 places:
1. Check the class property of the provider for the parameter.
2. If the provider has the parameters made available within the ``config``.
3. If provided, check the pipeline's ``provider_model`` for the oauth parameter
in the config field.
If the parameter cannot be found a KeyError will be raised.
"""
try:
prop = getattr(self, f"oauth_{parameter_name}")
if prop != "":
return prop
except AttributeError:
pass
if self.config.get(parameter_name):
return self.config.get(parameter_name)
model = self.pipeline.provider_model
if model and model.config.get(parameter_name) is not None:
return model.config.get(parameter_name)
raise KeyError(f'Unable to resolve OAuth parameter "{parameter_name}"')
def get_oauth_access_token_url(self):
return self._get_oauth_parameter("access_token_url")
def get_oauth_refresh_token_url(self):
raise NotImplementedError
def get_oauth_authorize_url(self):
return self._get_oauth_parameter("authorize_url")
def get_oauth_client_id(self):
return self._get_oauth_parameter("client_id")
def get_oauth_client_secret(self):
return self._get_oauth_parameter("client_secret")
def get_oauth_scopes(self):
return self.config.get("oauth_scopes", self.oauth_scopes)
def get_refresh_token_headers(self):
return None
def get_pipeline_views(self) -> list[PipelineView[IdentityPipeline]]:
return [
OAuth2LoginView(
authorize_url=self.get_oauth_authorize_url(),
client_id=self.get_oauth_client_id(),
scope=" ".join(self.get_oauth_scopes()),
),
OAuth2CallbackView(
access_token_url=self.get_oauth_access_token_url(),
client_id=self.get_oauth_client_id(),
client_secret=self.get_oauth_client_secret(),
),
]
def get_refresh_token_params(
self, refresh_token: str, identity: Identity | RpcIdentity, **kwargs: Any
) -> dict[str, str | None]:
raise NotImplementedError
def get_refresh_token_url(self) -> str:
raise NotImplementedError
def get_oauth_data(self, payload):
data = {"access_token": payload["access_token"]}
if "expires_in" in payload:
data["expires"] = int(time()) + int(payload["expires_in"])
if "refresh_token" in payload:
data["refresh_token"] = payload["refresh_token"]
if "token_type" in payload:
data["token_type"] = payload["token_type"]
return data
def get_refresh_token(
self, refresh_token, url: str, identity: Identity | RpcIdentity, **kwargs: Any
) -> Response:
data = self.get_refresh_token_params(refresh_token, identity, **kwargs)
try:
req = safe_urlopen(
url=url,
headers=self.get_refresh_token_headers(),
data=data,
verify_ssl=kwargs.get("verify_ssl", True),
)
req.raise_for_status()
except HTTPError as e:
error_resp = e.response
exc = ApiError.from_response(error_resp, url=url)
if isinstance(exc, ApiUnauthorized) or isinstance(exc, ApiInvalidRequestError):
raise IdentityNotValid from e
raise exc from e
return req
def refresh_identity(self, identity: Identity | RpcIdentity, **kwargs: Any) -> None:
refresh_token = identity.data.get("refresh_token")
if not refresh_token:
raise IdentityNotValid("Missing refresh token")
req = self.get_refresh_token(
refresh_token=refresh_token,
url=self.get_refresh_token_url(),
identity=identity,
**kwargs,
)
try:
body = safe_urlread(req)
payload = orjson.loads(body)
except orjson.JSONDecodeError:
payload = {}
identity.data.update(self.get_oauth_data(payload))
identity_service.update_data(identity_id=identity.id, data=identity.data)
def record_event(event: IntegrationPipelineViewType, provider: str):
from sentry.identity import default_manager as identity_manager
try:
identity_manager.get(provider)
except NotRegistered:
logger.exception("oauth2.record_event.invalid_provider", extra={"provider": provider})
return IntegrationPipelineViewEvent(
event, domain=IntegrationDomain.IDENTITY, provider_key=provider
)
| OAuth2Provider |
python | hynek__structlog | src/structlog/_base.py | {
"start": 620,
"end": 7364
} | class ____:
"""
Immutable context carrier.
Doesn't do any actual logging; examples for useful subclasses are:
- the generic `BoundLogger` that can wrap anything,
- `structlog.stdlib.BoundLogger`.
- `structlog.twisted.BoundLogger`,
See also `custom-wrappers`.
"""
_logger: WrappedLogger
"""
Wrapped logger.
.. note::
Despite underscore available **read-only** to custom wrapper classes.
See also `custom-wrappers`.
"""
def __init__(
self,
logger: WrappedLogger,
processors: Iterable[Processor],
context: Context,
):
self._logger = logger
self._processors = processors
self._context = context
def __repr__(self) -> str:
return f"<{self.__class__.__name__}(context={self._context!r}, processors={self._processors!r})>"
def __eq__(self, other: object) -> bool:
try:
return self._context == other._context # type: ignore[attr-defined]
except AttributeError:
return False
def __ne__(self, other: object) -> bool:
return not self.__eq__(other)
def bind(self, **new_values: Any) -> Self:
"""
Return a new logger with *new_values* added to the existing ones.
"""
return self.__class__(
self._logger,
self._processors,
self._context.__class__(self._context, **new_values),
)
def unbind(self, *keys: str) -> Self:
"""
Return a new logger with *keys* removed from the context.
Raises:
KeyError: If the key is not part of the context.
"""
bl = self.bind()
for key in keys:
del bl._context[key]
return bl
def try_unbind(self, *keys: str) -> Self:
"""
Like :meth:`unbind`, but best effort: missing keys are ignored.
.. versionadded:: 18.2.0
"""
bl = self.bind()
for key in keys:
bl._context.pop(key, None)
return bl
def new(self, **new_values: Any) -> Self:
"""
Clear context and binds *new_values* using `bind`.
Only necessary with dict implementations that keep global state like
those wrapped by `structlog.threadlocal.wrap_dict` when threads
are reused.
"""
self._context.clear()
return self.bind(**new_values)
# Helper methods for sub-classing concrete BoundLoggers.
def _process_event(
self, method_name: str, event: str | None, event_kw: dict[str, Any]
) -> tuple[Sequence[Any], Mapping[str, Any]]:
"""
Combines creates an ``event_dict`` and runs the chain.
Call it to combine your *event* and *context* into an event_dict and
process using the processor chain.
Args:
method_name:
The name of the logger method. Is passed into the processors.
event:
The event -- usually the first positional argument to a logger.
event_kw:
Additional event keywords. For example if someone calls
``log.info("foo", bar=42)``, *event* would to be ``"foo"`` and
*event_kw* ``{"bar": 42}``.
Raises:
structlog.DropEvent: if log entry should be dropped.
ValueError:
if the final processor doesn't return a str, bytes, bytearray,
tuple, or a dict.
Returns:
`tuple` of ``(*args, **kw)``
.. note::
Despite underscore available to custom wrapper classes.
See also `custom-wrappers`.
.. versionchanged:: 14.0.0
Allow final processor to return a `dict`.
.. versionchanged:: 20.2.0
Allow final processor to return `bytes`.
.. versionchanged:: 21.2.0
Allow final processor to return a `bytearray`.
"""
# We're typing it as Any, because processors can return more than an
# EventDict.
event_dict: Any = self._context.copy()
event_dict.update(**event_kw)
if event is not None:
event_dict["event"] = event
for proc in self._processors:
event_dict = proc(self._logger, method_name, event_dict)
if isinstance(event_dict, (str, bytes, bytearray)):
return (event_dict,), {}
if isinstance(event_dict, tuple):
# In this case we assume that the last processor returned a tuple
# of ``(args, kwargs)`` and pass it right through.
return event_dict
if isinstance(event_dict, dict):
return (), event_dict
msg = (
"Last processor didn't return an appropriate value. "
"Valid return values are a dict, a tuple of (args, kwargs), bytes, or a str."
)
raise ValueError(msg)
def _proxy_to_logger(
self, method_name: str, event: str | None = None, **event_kw: Any
) -> Any:
"""
Run processor chain on event & call *method_name* on wrapped logger.
DRY convenience method that runs :func:`_process_event`, takes care of
handling :exc:`structlog.DropEvent`, and finally calls *method_name* on
:attr:`_logger` with the result.
Args:
method_name:
The name of the method that's going to get called. Technically
it should be identical to the method the user called because it
also get passed into processors.
event:
The event -- usually the first positional argument to a logger.
event_kw:
Additional event keywords. For example if someone calls
``log.info("foo", bar=42)``, *event* would to be ``"foo"`` and
*event_kw* ``{"bar": 42}``.
.. note::
Despite underscore available to custom wrapper classes.
See also `custom-wrappers`.
"""
try:
args, kw = self._process_event(method_name, event, event_kw)
return getattr(self._logger, method_name)(*args, **kw)
except DropEvent:
return None
def get_context(bound_logger: BindableLogger) -> Context:
"""
Return *bound_logger*'s context.
The type of *bound_logger* and the type returned depend on your
configuration.
Args:
bound_logger: The bound logger whose context you want.
Returns:
The *actual* context from *bound_logger*. It is *not* copied first.
.. versionadded:: 20.2.0
"""
# This probably will get more complicated in the future.
return bound_logger._context
| BoundLoggerBase |
python | celery__celery | t/unit/backends/test_mongodb.py | {
"start": 2012,
"end": 25876
} | class ____:
default_url = 'mongodb://uuuu:pwpw@hostname.dom/database'
replica_set_url = (
'mongodb://uuuu:pwpw@hostname.dom,'
'hostname.dom/database?replicaSet=rs'
)
sanitized_default_url = 'mongodb://uuuu:**@hostname.dom/database'
sanitized_replica_set_url = (
'mongodb://uuuu:**@hostname.dom/,'
'hostname.dom/database?replicaSet=rs'
)
def setup_method(self):
self.patching('celery.backends.mongodb.MongoBackend.encode')
self.patching('celery.backends.mongodb.MongoBackend.decode')
self.patching('celery.backends.mongodb.Binary')
self.backend = MongoBackend(app=self.app, url=self.default_url)
def test_init_no_mongodb(self, patching):
patching('celery.backends.mongodb.pymongo', None)
with pytest.raises(ImproperlyConfigured):
MongoBackend(app=self.app)
def test_init_no_settings(self):
self.app.conf.mongodb_backend_settings = []
with pytest.raises(ImproperlyConfigured):
MongoBackend(app=self.app)
def test_init_settings_is_None(self):
self.app.conf.mongodb_backend_settings = None
MongoBackend(app=self.app)
def test_init_with_settings(self):
self.app.conf.mongodb_backend_settings = None
# empty settings
mb = MongoBackend(app=self.app)
# uri
uri = 'mongodb://localhost:27017'
mb = MongoBackend(app=self.app, url=uri)
assert mb.mongo_host == ['localhost:27017']
assert mb.options == mb._prepare_client_options()
assert mb.database_name == 'celery'
# uri with database name
uri = 'mongodb://localhost:27017/celerydb'
mb = MongoBackend(app=self.app, url=uri)
assert mb.database_name == 'celerydb'
# uri with user, password, database name, replica set
uri = ('mongodb://'
'celeryuser:celerypassword@'
'mongo1.example.com:27017,'
'mongo2.example.com:27017,'
'mongo3.example.com:27017/'
'celerydatabase?replicaSet=rs0')
mb = MongoBackend(app=self.app, url=uri)
assert mb.mongo_host == MONGODB_BACKEND_HOST
if 'replicaSet' in mb.options: # pragma: no cover # pymongo >= 4.14
replicaset_option = 'replicaSet'
else: # pragma: no cover # pymongo < 4.14
replicaset_option = 'replicaset'
assert mb.options == {
**mb._prepare_client_options(),
replicaset_option: 'rs0',
}
assert mb.user == CELERY_USER
assert mb.password == CELERY_PASSWORD
assert mb.database_name == CELERY_DATABASE
# same uri, change some parameters in backend settings
self.app.conf.mongodb_backend_settings = {
replicaset_option: 'rs1',
'user': 'backenduser',
'database': 'another_db',
'options': {
'socketKeepAlive': True,
},
}
mb = MongoBackend(app=self.app, url=uri)
assert mb.mongo_host == MONGODB_BACKEND_HOST
assert mb.options == {
**mb._prepare_client_options(),
replicaset_option: 'rs1',
'socketKeepAlive': True,
}
assert mb.user == 'backenduser'
assert mb.password == CELERY_PASSWORD
assert mb.database_name == 'another_db'
mb = MongoBackend(app=self.app, url='mongodb://')
@pytest.mark.skipif(dns.version.MAJOR > 1,
reason="For dnspython version > 1, pymongo's"
"srv_resolver calls resolver.resolve")
@pytest.mark.skipif(pymongo.version_tuple[0] > 3,
reason="For pymongo version > 3, options returns ssl")
def test_init_mongodb_dnspython1_pymongo3_seedlist(self):
resolver = fake_resolver_dnspython()
self.app.conf.mongodb_backend_settings = None
with patch('dns.resolver.query', side_effect=resolver):
mb = self.perform_seedlist_assertions()
assert mb.options == dict(
mb._prepare_client_options(),
replicaset='rs0',
ssl=True
)
@pytest.mark.skipif(dns.version.MAJOR <= 1,
reason="For dnspython versions 1.X, pymongo's"
"srv_resolver calls resolver.query")
@pytest.mark.skipif(pymongo.version_tuple[0] > 3,
reason="For pymongo version > 3, options returns ssl")
def test_init_mongodb_dnspython2_pymongo3_seedlist(self):
resolver = fake_resolver_dnspython()
self.app.conf.mongodb_backend_settings = None
with patch('dns.resolver.resolve', side_effect=resolver):
mb = self.perform_seedlist_assertions()
assert mb.options == dict(
mb._prepare_client_options(),
replicaset='rs0',
ssl=True
)
@pytest.mark.skipif(dns.version.MAJOR > 1,
reason="For dnspython version >= 2, pymongo's"
"srv_resolver calls resolver.resolve")
@pytest.mark.skipif(pymongo.version_tuple[0] <= 3,
reason="For pymongo version > 3, options returns tls")
def test_init_mongodb_dnspython1_pymongo4_seedlist(self):
resolver = fake_resolver_dnspython()
self.app.conf.mongodb_backend_settings = None
with patch('dns.resolver.query', side_effect=resolver):
mb = self.perform_seedlist_assertions()
assert mb.options == dict(
mb._prepare_client_options(),
replicaset='rs0',
tls=True
)
@pytest.mark.skipif(dns.version.MAJOR <= 1,
reason="For dnspython versions 1.X, pymongo's"
"srv_resolver calls resolver.query")
@pytest.mark.skipif(pymongo.version_tuple[0] <= 3,
reason="For pymongo version > 3, options returns tls")
def test_init_mongodb_dnspython2_pymongo4_seedlist(self):
resolver = fake_resolver_dnspython()
self.app.conf.mongodb_backend_settings = None
with patch('dns.resolver.resolve', side_effect=resolver):
mb = self.perform_seedlist_assertions()
if 'replicaSet' in mb.options: # pragma: no cover # pymongo >= 4.14
replicaset_option = 'replicaSet'
else: # pragma: no cover # pymongo < 4.14
replicaset_option = 'replicaset'
assert mb.options == {
**mb._prepare_client_options(),
replicaset_option: 'rs0',
'tls': True,
}
def perform_seedlist_assertions(self):
mb = MongoBackend(app=self.app, url=MONGODB_SEEDLIST_URI)
assert mb.mongo_host == MONGODB_BACKEND_HOST
assert mb.user == CELERY_USER
assert mb.password == CELERY_PASSWORD
assert mb.database_name == CELERY_DATABASE
return mb
def test_ensure_mongodb_uri_compliance(self):
mb = MongoBackend(app=self.app, url=None)
compliant_uri = mb._ensure_mongodb_uri_compliance
assert compliant_uri('mongodb://') == 'mongodb://localhost'
assert compliant_uri('mongodb+something://host') == \
'mongodb+something://host'
assert compliant_uri('something://host') == 'mongodb+something://host'
@pytest.mark.usefixtures('depends_on_current_app')
def test_reduce(self):
x = MongoBackend(app=self.app)
assert loads(dumps(x))
def test_get_connection_connection_exists(self):
with patch('pymongo.MongoClient') as mock_Connection:
self.backend._connection = sentinel._connection
connection = self.backend._get_connection()
assert sentinel._connection == connection
mock_Connection.assert_not_called()
def test_get_connection_no_connection_host(self):
with patch('pymongo.MongoClient') as mock_Connection:
self.backend._connection = None
self.backend.host = MONGODB_HOST
self.backend.port = MONGODB_PORT
mock_Connection.return_value = sentinel.connection
connection = self.backend._get_connection()
mock_Connection.assert_called_once_with(
host='mongodb://localhost:27017',
**self.backend._prepare_client_options()
)
assert sentinel.connection == connection
def test_get_connection_no_connection_mongodb_uri(self):
with patch('pymongo.MongoClient') as mock_Connection:
mongodb_uri = 'mongodb://%s:%d' % (MONGODB_HOST, MONGODB_PORT)
self.backend._connection = None
self.backend.host = mongodb_uri
mock_Connection.return_value = sentinel.connection
connection = self.backend._get_connection()
mock_Connection.assert_called_once_with(
host=mongodb_uri, **self.backend._prepare_client_options()
)
assert sentinel.connection == connection
def test_get_connection_with_authmechanism(self):
with patch('pymongo.MongoClient') as mock_Connection:
self.app.conf.mongodb_backend_settings = None
uri = ('mongodb://'
'celeryuser:celerypassword@'
'localhost:27017/'
'celerydatabase?authMechanism=SCRAM-SHA-256')
mb = MongoBackend(app=self.app, url=uri)
mock_Connection.return_value = sentinel.connection
connection = mb._get_connection()
if 'authMechanism' in mb.options: # pragma: no cover # pymongo >= 4.14
authmechanism_option = 'authMechanism'
else: # pragma: no cover # pymongo < 4.14
authmechanism_option = 'authmechanism'
mock_Connection.assert_called_once_with(
host=['localhost:27017'],
username=CELERY_USER,
password=CELERY_PASSWORD,
**{**mb._prepare_client_options(), authmechanism_option: 'SCRAM-SHA-256'}
)
assert sentinel.connection == connection
def test_get_connection_with_authmechanism_no_username(self):
with patch('pymongo.MongoClient') as mock_Connection:
self.app.conf.mongodb_backend_settings = None
uri = ('mongodb://'
'localhost:27017/'
'celerydatabase?authMechanism=SCRAM-SHA-256')
mb = MongoBackend(app=self.app, url=uri)
mock_Connection.side_effect = ConfigurationError(
'SCRAM-SHA-256 requires a username.')
with pytest.raises(ConfigurationError):
mb._get_connection()
if 'authMechanism' in mb.options: # pragma: no cover # pymongo >= 4.14
authmechanism_option = 'authMechanism'
else: # pragma: no cover # pymongo < 4.14
authmechanism_option = 'authmechanism'
mock_Connection.assert_called_once_with(
host=['localhost:27017'],
**{**mb._prepare_client_options(), authmechanism_option: 'SCRAM-SHA-256'}
)
@patch('celery.backends.mongodb.MongoBackend._get_connection')
def test_get_database_no_existing(self, mock_get_connection):
# Should really check for combinations of these two, to be complete.
self.backend.user = MONGODB_USER
self.backend.password = MONGODB_PASSWORD
mock_database = Mock()
mock_connection = MagicMock(spec=['__getitem__'])
mock_connection.__getitem__.return_value = mock_database
mock_get_connection.return_value = mock_connection
database = self.backend.database
assert database is mock_database
assert self.backend.__dict__['database'] is mock_database
@patch('celery.backends.mongodb.MongoBackend._get_connection')
def test_get_database_no_existing_no_auth(self, mock_get_connection):
# Should really check for combinations of these two, to be complete.
self.backend.user = None
self.backend.password = None
mock_database = Mock()
mock_connection = MagicMock(spec=['__getitem__'])
mock_connection.__getitem__.return_value = mock_database
mock_get_connection.return_value = mock_connection
database = self.backend.database
assert database is mock_database
assert self.backend.__dict__['database'] is mock_database
@patch('celery.backends.mongodb.MongoBackend._get_database')
def test_store_result(self, mock_get_database):
self.backend.taskmeta_collection = MONGODB_COLLECTION
mock_database = MagicMock(spec=['__getitem__', '__setitem__'])
mock_collection = Mock()
mock_get_database.return_value = mock_database
mock_database.__getitem__.return_value = mock_collection
ret_val = self.backend._store_result(
sentinel.task_id, sentinel.result, sentinel.status)
mock_get_database.assert_called_once_with()
mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION)
mock_collection.replace_one.assert_called_once_with(ANY, ANY,
upsert=True)
assert sentinel.result == ret_val
mock_collection.replace_one.side_effect = InvalidDocument()
with pytest.raises(EncodeError):
self.backend._store_result(
sentinel.task_id, sentinel.result, sentinel.status)
@patch('celery.backends.mongodb.MongoBackend._get_database')
def test_store_result_with_request(self, mock_get_database):
self.backend.taskmeta_collection = MONGODB_COLLECTION
mock_database = MagicMock(spec=['__getitem__', '__setitem__'])
mock_collection = Mock()
mock_request = MagicMock(spec=['parent_id'])
mock_get_database.return_value = mock_database
mock_database.__getitem__.return_value = mock_collection
mock_request.parent_id = sentinel.parent_id
ret_val = self.backend._store_result(
sentinel.task_id, sentinel.result, sentinel.status,
request=mock_request)
mock_get_database.assert_called_once_with()
mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION)
parameters = mock_collection.replace_one.call_args[0][1]
assert parameters['parent_id'] == sentinel.parent_id
assert sentinel.result == ret_val
mock_collection.replace_one.side_effect = InvalidDocument()
with pytest.raises(EncodeError):
self.backend._store_result(
sentinel.task_id, sentinel.result, sentinel.status)
@patch('celery.backends.mongodb.MongoBackend._get_database')
def test_get_task_meta_for(self, mock_get_database):
self.backend.taskmeta_collection = MONGODB_COLLECTION
mock_database = MagicMock(spec=['__getitem__', '__setitem__'])
mock_collection = Mock()
mock_collection.find_one.return_value = MagicMock()
mock_get_database.return_value = mock_database
mock_database.__getitem__.return_value = mock_collection
ret_val = self.backend._get_task_meta_for(sentinel.task_id)
mock_get_database.assert_called_once_with()
mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION)
assert list(sorted([
'status', 'task_id', 'date_done',
'traceback', 'result', 'children',
])) == list(sorted(ret_val.keys()))
@patch('celery.backends.mongodb.MongoBackend._get_database')
def test_get_task_meta_for_result_extended(self, mock_get_database):
self.backend.taskmeta_collection = MONGODB_COLLECTION
mock_database = MagicMock(spec=['__getitem__', '__setitem__'])
mock_collection = Mock()
mock_collection.find_one.return_value = MagicMock()
mock_get_database.return_value = mock_database
mock_database.__getitem__.return_value = mock_collection
self.app.conf.result_extended = True
ret_val = self.backend._get_task_meta_for(sentinel.task_id)
mock_get_database.assert_called_once_with()
mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION)
assert list(sorted([
'status', 'task_id', 'date_done',
'traceback', 'result', 'children',
'name', 'args', 'queue', 'kwargs', 'worker', 'retries',
])) == list(sorted(ret_val.keys()))
@patch('celery.backends.mongodb.MongoBackend._get_database')
def test_get_task_meta_for_no_result(self, mock_get_database):
self.backend.taskmeta_collection = MONGODB_COLLECTION
mock_database = MagicMock(spec=['__getitem__', '__setitem__'])
mock_collection = Mock()
mock_collection.find_one.return_value = None
mock_get_database.return_value = mock_database
mock_database.__getitem__.return_value = mock_collection
ret_val = self.backend._get_task_meta_for(sentinel.task_id)
mock_get_database.assert_called_once_with()
mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION)
assert {'status': states.PENDING, 'result': None} == ret_val
@patch('celery.backends.mongodb.MongoBackend._get_database')
def test_save_group(self, mock_get_database):
self.backend.groupmeta_collection = MONGODB_GROUP_COLLECTION
mock_database = MagicMock(spec=['__getitem__', '__setitem__'])
mock_collection = Mock()
mock_get_database.return_value = mock_database
mock_database.__getitem__.return_value = mock_collection
res = [self.app.AsyncResult(i) for i in range(3)]
ret_val = self.backend._save_group(
sentinel.taskset_id, res,
)
mock_get_database.assert_called_once_with()
mock_database.__getitem__.assert_called_once_with(
MONGODB_GROUP_COLLECTION,
)
mock_collection.replace_one.assert_called_once_with(ANY, ANY,
upsert=True)
assert res == ret_val
@patch('celery.backends.mongodb.MongoBackend._get_database')
def test_restore_group(self, mock_get_database):
self.backend.groupmeta_collection = MONGODB_GROUP_COLLECTION
mock_database = MagicMock(spec=['__getitem__', '__setitem__'])
mock_collection = Mock()
mock_collection.find_one.return_value = {
'_id': sentinel.taskset_id,
'result': [uuid(), uuid()],
'date_done': 1,
}
self.backend.decode.side_effect = lambda r: r
mock_get_database.return_value = mock_database
mock_database.__getitem__.return_value = mock_collection
ret_val = self.backend._restore_group(sentinel.taskset_id)
mock_get_database.assert_called_once_with()
mock_collection.find_one.assert_called_once_with(
{'_id': sentinel.taskset_id})
assert (sorted(['date_done', 'result', 'task_id']) ==
sorted(list(ret_val.keys())))
mock_collection.find_one.return_value = None
self.backend._restore_group(sentinel.taskset_id)
@patch('celery.backends.mongodb.MongoBackend._get_database')
def test_delete_group(self, mock_get_database):
self.backend.taskmeta_collection = MONGODB_COLLECTION
mock_database = MagicMock(spec=['__getitem__', '__setitem__'])
mock_collection = Mock()
mock_get_database.return_value = mock_database
mock_database.__getitem__.return_value = mock_collection
self.backend._delete_group(sentinel.taskset_id)
mock_get_database.assert_called_once_with()
mock_collection.delete_one.assert_called_once_with(
{'_id': sentinel.taskset_id})
@patch('celery.backends.mongodb.MongoBackend._get_database')
def test__forget(self, mock_get_database):
# note: here tested _forget method, not forget method
self.backend.taskmeta_collection = MONGODB_COLLECTION
mock_database = MagicMock(spec=['__getitem__', '__setitem__'])
mock_collection = Mock()
mock_get_database.return_value = mock_database
mock_database.__getitem__.return_value = mock_collection
self.backend._forget(sentinel.task_id)
mock_get_database.assert_called_once_with()
mock_database.__getitem__.assert_called_once_with(
MONGODB_COLLECTION)
mock_collection.delete_one.assert_called_once_with(
{'_id': sentinel.task_id})
@patch('celery.backends.mongodb.MongoBackend._get_database')
def test_cleanup(self, mock_get_database):
self.backend.taskmeta_collection = MONGODB_COLLECTION
self.backend.groupmeta_collection = MONGODB_GROUP_COLLECTION
mock_database = Mock(spec=['__getitem__', '__setitem__'],
name='MD')
self.backend.collections = mock_collection = Mock()
mock_get_database.return_value = mock_database
mock_database.__getitem__ = Mock(name='MD.__getitem__')
mock_database.__getitem__.return_value = mock_collection
def now_func():
return datetime.datetime.now(datetime.timezone.utc)
self.backend.app.now = now_func
self.backend.cleanup()
mock_get_database.assert_called_once_with()
mock_collection.delete_many.assert_called()
self.backend.collections = mock_collection = Mock()
self.backend.expires = None
self.backend.cleanup()
mock_collection.delete_many.assert_not_called()
def test_prepare_client_options(self):
with patch('pymongo.version_tuple', new=(3, 0, 3)):
options = self.backend._prepare_client_options()
assert options == {
'maxPoolSize': self.backend.max_pool_size
}
def test_as_uri_include_password(self):
assert self.backend.as_uri(True) == self.default_url
def test_as_uri_exclude_password(self):
assert self.backend.as_uri() == self.sanitized_default_url
def test_as_uri_include_password_replica_set(self):
backend = MongoBackend(app=self.app, url=self.replica_set_url)
assert backend.as_uri(True) == self.replica_set_url
def test_as_uri_exclude_password_replica_set(self):
backend = MongoBackend(app=self.app, url=self.replica_set_url)
assert backend.as_uri() == self.sanitized_replica_set_url
def test_regression_worker_startup_info(self):
self.app.conf.result_backend = (
'mongodb://user:password@host0.com:43437,host1.com:43437'
'/work4us?replicaSet=rs&ssl=true'
)
worker = self.app.Worker()
with conftest.stdouts():
worker.on_start()
assert worker.startup_info()
@pytest.fixture(scope="function")
def mongo_backend_factory(app):
"""Return a factory that creates MongoBackend instance with given serializer, including BSON."""
def create_mongo_backend(serializer):
# NOTE: `bson` is a only mongodb-specific type and can be set only directly on MongoBackend instance.
if serializer == "bson":
beckend = MongoBackend(app=app)
beckend.serializer = serializer
else:
app.conf.accept_content = ['json', 'pickle', 'msgpack', 'yaml']
app.conf.result_serializer = serializer
beckend = MongoBackend(app=app)
return beckend
yield create_mongo_backend
@pytest.mark.parametrize("serializer,encoded_into", [
('bson', int),
('json', str),
('pickle', Binary),
('msgpack', Binary),
('yaml', str),
])
| test_MongoBackend |
python | scipy__scipy | scipy/stats/_covariance.py | {
"start": 22373,
"end": 22913
} | class ____(Covariance):
"""
Representation of a covariance provided via an instance of _PSD
"""
__class_getitem__ = None
def __init__(self, psd):
self._LP = psd.U
self._log_pdet = psd.log_pdet
self._rank = psd.rank
self._covariance = psd._M
self._shape = psd._M.shape
self._psd = psd
self._allow_singular = False # by default
def _whiten(self, x):
return x @ self._LP
def _support_mask(self, x):
return self._psd._support_mask(x)
| CovViaPSD |
python | scipy__scipy | scipy/stats/_multivariate.py | {
"start": 214427,
"end": 218966
} | class ____(multi_rv_generic):
r"""A vector-valued uniform direction.
Return a random direction (unit vector). The `dim` keyword specifies
the dimensionality of the space.
Methods
-------
rvs(dim=None, size=1, random_state=None)
Draw random directions.
Parameters
----------
dim : scalar
Dimension of directions.
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
Used for drawing random variates.
If `seed` is `None`, the `~np.random.RandomState` singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used, seeded
with seed.
If `seed` is already a ``RandomState`` or ``Generator`` instance,
then that object is used.
Default is `None`.
Notes
-----
This distribution generates unit vectors uniformly distributed on
the surface of a hypersphere. These can be interpreted as random
directions.
For example, if `dim` is 3, 3D vectors from the surface of :math:`S^2`
will be sampled.
References
----------
.. [1] Marsaglia, G. (1972). "Choosing a Point from the Surface of a
Sphere". Annals of Mathematical Statistics. 43 (2): 645-646.
Examples
--------
>>> import numpy as np
>>> from scipy.stats import uniform_direction
>>> x = uniform_direction.rvs(3)
>>> np.linalg.norm(x)
1.
This generates one random direction, a vector on the surface of
:math:`S^2`.
Alternatively, the object may be called (as a function) to return a frozen
distribution with fixed `dim` parameter. Here,
we create a `uniform_direction` with ``dim=3`` and draw 5 observations.
The samples are then arranged in an array of shape 5x3.
>>> rng = np.random.default_rng()
>>> uniform_sphere_dist = uniform_direction(3)
>>> unit_vectors = uniform_sphere_dist.rvs(5, random_state=rng)
>>> unit_vectors
array([[ 0.56688642, -0.1332634 , -0.81294566],
[-0.427126 , -0.74779278, 0.50830044],
[ 0.3793989 , 0.92346629, 0.05715323],
[ 0.36428383, -0.92449076, -0.11231259],
[-0.27733285, 0.94410968, -0.17816678]])
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def __call__(self, dim=None, seed=None):
"""Create a frozen n-dimensional uniform direction distribution.
See `uniform_direction` for more information.
"""
return uniform_direction_frozen(dim, seed=seed)
def _process_parameters(self, dim):
"""Dimension N must be specified; it cannot be inferred."""
if dim is None or not np.isscalar(dim) or dim < 1 or dim != int(dim):
raise ValueError("Dimension of vector must be specified, "
"and must be an integer greater than 0.")
return int(dim)
def rvs(self, dim, size=None, random_state=None):
"""Draw random samples from S(N-1).
Parameters
----------
dim : integer
Dimension of space (N).
size : int or tuple of ints, optional
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement.
Because each sample is N-dimensional, the output shape
is (m,n,k,N). If no shape is specified, a single (N-D)
sample is returned.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
Pseudorandom number generator state used to generate resamples.
If `random_state` is ``None`` (or `np.random`), the
`numpy.random.RandomState` singleton is used.
If `random_state` is an int, a new ``RandomState`` instance is
used, seeded with `random_state`.
If `random_state` is already a ``Generator`` or ``RandomState``
instance then that instance is used.
Returns
-------
rvs : ndarray
Random direction vectors
"""
random_state = self._get_random_state(random_state)
if size is None:
size = np.array([], dtype=int)
size = np.atleast_1d(size)
dim = self._process_parameters(dim)
samples = _sample_uniform_direction(dim, size, random_state)
return samples
uniform_direction = uniform_direction_gen()
| uniform_direction_gen |
python | django-import-export__django-import-export | import_export/results.py | {
"start": 5806,
"end": 6081
} | class ____:
"""A row that resulted in one or more errors being raised during import."""
def __init__(self, number, errors):
#: The row number
self.number = number
#: A list of errors associated with the row
self.errors = errors
| ErrorRow |
python | pypa__installer | src/installer/records.py | {
"start": 462,
"end": 745
} | class ____(Exception):
"""Raised when a RecordEntry is not valid, due to improper element values or count."""
elements: Iterable[str]
issues: Iterable[str]
def __post_init__(self) -> None:
super().__init__(", ".join(self.issues))
@dataclass
| InvalidRecordEntry |
python | django__django | django/contrib/staticfiles/handlers.py | {
"start": 459,
"end": 2114
} | class ____:
"""
Common methods used by WSGI and ASGI handlers.
"""
# May be used to differentiate between handler types (e.g. in a
# request_finished signal)
handles_files = True
def load_middleware(self):
# Middleware are already loaded for self.application; no need to reload
# them for self.
pass
def get_base_url(self):
utils.check_settings()
return settings.STATIC_URL
def _should_handle(self, path):
"""
Check if the path should be handled. Ignore the path if:
* the host is provided as part of the base_url
* the request's path isn't under the media path (or equal)
"""
return path.startswith(self.base_url.path) and not self.base_url.netloc
def file_path(self, url):
"""
Return the relative path to the media file on disk for the given URL.
"""
relative_url = url.removeprefix(self.base_url.path)
return url2pathname(relative_url)
def serve(self, request):
"""Serve the request path."""
return serve(request, self.file_path(request.path), insecure=True)
def get_response(self, request):
try:
return self.serve(request)
except Http404 as e:
return response_for_exception(request, e)
async def get_response_async(self, request):
try:
return await sync_to_async(self.serve, thread_sensitive=False)(request)
except Http404 as e:
return await sync_to_async(response_for_exception, thread_sensitive=False)(
request, e
)
| StaticFilesHandlerMixin |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-deeplake/llama_index/vector_stores/deeplake/base.py | {
"start": 12756,
"end": 24114
} | class ____(BasePydanticVectorStore):
"""
The DeepLake Vector Store.
In this vector store we store the text, its embedding and
a few pieces of its metadata in a deeplake dataset. This implementation
allows the use of an already existing deeplake dataset if it is one that was created
this vector store. It also supports creating a new one if the dataset doesn't
exist or if `overwrite` is set to True.
Examples:
`pip install llama-index-vector-stores-deeplake`
```python
from llama_index.vector_stores.deeplake import DeepLakeVectorStore
# Create an instance of DeepLakeVectorStore
vector_store = DeepLakeVectorStore(dataset_path=dataset_path, overwrite=True)
```
"""
stores_text: bool = True
flat_metadata: bool = True
ingestion_batch_size: int
num_workers: int
token: Optional[str]
read_only: Optional[bool]
dataset_path: str
vectorstore: Any = "VectorStore"
_embedding_dimension: int = PrivateAttr()
_ttl_seconds: Optional[int] = PrivateAttr()
_deeplake_db: Any = PrivateAttr()
_deeplake_db_collection: Any = PrivateAttr()
_id_tensor_name: str = PrivateAttr()
def __init__(
self,
dataset_path: str = "llama_index",
token: Optional[str] = None,
read_only: Optional[bool] = False,
ingestion_batch_size: int = 1024,
ingestion_num_workers: int = 4,
overwrite: bool = False,
exec_option: Optional[str] = None,
verbose: bool = True,
**kwargs: Any,
) -> None:
"""
Args:
dataset_path (str): The full path for storing to the Deep Lake Vector Store. It can be:
- a Deep Lake cloud path of the form ``hub://org_id/dataset_name``. Requires registration with Deep Lake.
- an s3 path of the form ``s3://bucketname/path/to/dataset``. Credentials are required in either the environment or passed to the creds argument.
- a local file system path of the form ``./path/to/dataset`` or ``~/path/to/dataset`` or ``path/to/dataset``.
- a memory path of the form ``mem://path/to/dataset`` which doesn't save the dataset but keeps it in memory instead. Should be used only for testing as it does not persist.
Defaults to "llama_index".
overwrite (bool, optional): If set to True this overwrites the Vector Store if it already exists. Defaults to False.
token (str, optional): Activeloop token, used for fetching user credentials. This is Optional, tokens are normally autogenerated. Defaults to None.
read_only (bool, optional): Opens dataset in read-only mode if True. Defaults to False.
ingestion_batch_size (int): During data ingestion, data is divided
into batches. Batch size is the size of each batch. Defaults to 1024.
ingestion_num_workers (int): number of workers to use during data ingestion.
Defaults to 4.
exec_option (str): Default method for search execution. It could be either ``"auto"``, ``"python"``, ``"compute_engine"`` or ``"tensor_db"``. Defaults to ``"auto"``. If None, it's set to "auto".
- ``auto``- Selects the best execution method based on the storage location of the Vector Store. It is the default option.
- ``python`` - Pure-python implementation that runs on the client and can be used for data stored anywhere. WARNING: using this option with big datasets is discouraged because it can lead to memory issues.
- ``compute_engine`` - Performant C++ implementation of the Deep Lake Compute Engine that runs on the client and can be used for any data stored in or connected to Deep Lake. It cannot be used with in-memory or local datasets.
- ``tensor_db`` - Performant and fully-hosted Managed Tensor Database that is responsible for storage and query execution. Only available for data stored in the Deep Lake Managed Database. Store datasets in this database by specifying runtime = {"tensor_db": True} during dataset creation.
Raises:
ImportError: Unable to import `deeplake`.
"""
super().__init__(
dataset_path=dataset_path,
token=token,
read_only=read_only,
ingestion_batch_size=ingestion_batch_size,
num_workers=ingestion_num_workers,
)
self.vectorstore = VectorStore(
path=dataset_path,
ingestion_batch_size=ingestion_batch_size,
num_workers=ingestion_num_workers,
token=token,
read_only=read_only,
exec_option=exec_option,
overwrite=overwrite,
verbose=verbose,
**kwargs,
)
try:
self._id_tensor_name = (
"ids" if "ids" in self.vectorstore.tensors() else "id"
)
except AttributeError:
self._id_tensor_name = "id"
@property
def client(self) -> Any:
"""
Get client.
Returns:
Any: DeepLake vectorstore dataset.
"""
return self.vectorstore.dataset
def summary(self):
self.vectorstore.summary()
def get_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
) -> List[BaseNode]:
"""Get nodes from vector store."""
if node_ids:
data = self.vectorstore.search(filter={"id": node_ids})
else:
data = self.vectorstore.search(filter={})
nodes = []
for metadata in data["metadata"]:
nodes.append(metadata_dict_to_node(metadata))
def filter_func(doc):
if not filters:
return True
found_one = False
for f in filters.filters:
value = doc.metadata[f.key]
if f.operator == FilterOperator.EQ:
result = value == f.value
elif f.operator == FilterOperator.GT:
result = value > f.value
elif f.operator == FilterOperator.GTE:
result = value >= f.value
elif f.operator == FilterOperator.LT:
result = value < f.value
elif f.operator == FilterOperator.LTE:
result = value <= f.value
elif f.operator == FilterOperator.NE:
result = value != f.value
elif f.operator == FilterOperator.IN:
result = value in f.value
elif f.operator == FilterOperator.NOT_IN:
result = value not in f.value
elif f.operator == FilterOperator.TEXT_MATCH:
result = f.value in value
else:
raise ValueError(f"Unsupported filter operator: {f.operator}")
if result:
found_one = True
if filters.condition == FilterCondition.OR:
return True
else:
if filters.condition == FilterCondition.AND:
return False
return found_one
if filters:
return [x for x in nodes if filter_func(x)]
else:
return nodes
def delete_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
**delete_kwargs: Any,
) -> None:
if filters:
self.vectorstore.delete(
ids=[
x.node_id
for x in self.get_nodes(node_ids=node_ids, filters=filters)
]
)
else:
self.vectorstore.delete(ids=node_ids)
def clear(self) -> None:
"""Clear the vector store."""
if DEEPLAKE_V4:
for i in range(len(self.vectorstore.ds) - 1, -1, -1):
self.vectorstore.ds.delete(i)
else:
self.vectorstore.delete(filter=lambda x: True)
def add(self, nodes: List[BaseNode], **add_kwargs: Any) -> List[str]:
"""
Add the embeddings and their nodes into DeepLake.
Args:
nodes (List[BaseNode]): List of nodes with embeddings
to insert.
Returns:
List[str]: List of ids inserted.
"""
embedding = []
metadata = []
id_ = []
text = []
for node in nodes:
embedding.append(node.get_embedding())
metadata.append(
node_to_metadata_dict(
node, remove_text=False, flat_metadata=self.flat_metadata
)
)
id_.append(node.node_id)
text.append(node.get_content(metadata_mode=MetadataMode.NONE))
if DEEPLAKE_V4:
kwargs = {self._id_tensor_name: id_}
return self.vectorstore.add(
embedding_data=embedding,
embedding_tensor="embedding",
metadata=metadata,
text=text,
return_ids=True,
**kwargs,
)
else:
kwargs = {
"embedding": embedding,
"metadata": metadata,
self._id_tensor_name: id_,
"text": text,
}
return self.vectorstore.add(
return_ids=True,
**kwargs,
)
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The doc_id of the document to delete.
"""
self.vectorstore.delete(filter={"metadata": {"doc_id": ref_doc_id}})
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""
Query index for top k most similar nodes.
Args:
query (VectorStoreQuery): VectorStoreQuery class input, it has
the following attributes:
1. query_embedding (List[float]): query embedding
2. similarity_top_k (int): top k most similar nodes
deep_memory (bool): Whether to use deep memory for query execution.
Returns:
VectorStoreQueryResult
"""
query_embedding = cast(List[float], query.query_embedding)
exec_option = kwargs.get("exec_option")
deep_memory = kwargs.get("deep_memory")
data = self.vectorstore.search(
embedding=query_embedding,
exec_option=exec_option,
k=query.similarity_top_k,
distance_metric="cos",
filter=query.filters,
return_tensors=None,
deep_memory=deep_memory,
)
similarities = data["score"]
ids = data[self._id_tensor_name]
metadatas = data["metadata"]
nodes = []
for metadata in metadatas:
if "_node_type" not in metadata:
metadata["_node_type"] = TextNode.class_name()
nodes.append(metadata_dict_to_node(metadata))
return VectorStoreQueryResult(nodes=nodes, similarities=similarities, ids=ids)
| DeepLakeVectorStore |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 459841,
"end": 460303
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of AbortQueuedMigrations"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "success")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
success = sgqlc.types.Field(Boolean, graphql_name="success")
"""Did the operation succeed?"""
| AbortQueuedMigrationsPayload |
python | apache__airflow | airflow-core/tests/unit/models/test_taskinstance.py | {
"start": 117716,
"end": 128676
} | class ____:
@pytest.mark.parametrize(
("literal", "expected_outputs"),
[
pytest.param([1, 2, 3], [1, 2, 3], id="list"),
pytest.param({"a": 1, "b": 2}, [("a", 1), ("b", 2)], id="dict"),
],
)
def test_map_literal(self, literal, expected_outputs, dag_maker, session):
outputs = []
with dag_maker(dag_id="literal", session=session) as dag:
@dag.task
def show(value):
outputs.append(value)
show_task = show.expand(value=literal).operator
dag_run = dag_maker.create_dagrun()
mapped_tis = (
session.query(TI)
.filter_by(task_id="show", dag_id=dag_run.dag_id, run_id=dag_run.run_id)
.order_by(TI.map_index)
.all()
)
assert len(mapped_tis) == len(literal)
for ti in sorted(mapped_tis, key=operator.attrgetter("map_index")):
ti.refresh_from_task(show_task)
ti.run()
assert outputs == expected_outputs
@pytest.mark.parametrize(
("upstream_return", "expected_outputs"),
[
pytest.param([1, 2, 3], [1, 2, 3], id="list"),
pytest.param({"a": 1, "b": 2}, [("a", 1), ("b", 2)], id="dict"),
],
)
def test_map_xcom(self, upstream_return, expected_outputs, dag_maker, session):
outputs = []
with dag_maker(dag_id="xcom", session=session, serialized=True) as dag:
@dag.task
def emit():
return upstream_return
@dag.task
def show(value):
outputs.append(value)
show.expand(value=emit())
dag_run = dag_maker.create_dagrun()
emit_ti = dag_run.get_task_instance("emit", session=session)
emit_ti.refresh_from_task(dag_maker.dag.get_task("emit"))
dag_maker.run_ti(emit_ti.task_id, dag_run=dag_run, session=session)
show_task = dag.get_task("show")
mapped_tis, max_map_index = TaskMap.expand_mapped_task(
dag.task_dict[show_task.task_id], dag_run.run_id, session=session
)
assert max_map_index + 1 == len(mapped_tis) == len(upstream_return)
for ti in sorted(mapped_tis, key=operator.attrgetter("map_index")):
ti.refresh_from_task(show_task)
dag_maker.run_ti(ti.task_id, dag_run=dag_run, map_index=ti.map_index, session=session)
assert outputs == expected_outputs
def test_map_literal_cross_product(self, dag_maker, session):
"""Test a mapped task with literal cross product args expand properly."""
outputs = []
with dag_maker(dag_id="product_same_types", session=session, serialized=True) as dag:
@dag.task
def show(a, b):
outputs.append((a, b))
show.expand(a=[2, 4, 8], b=[5, 10])
dag_run = dag_maker.create_dagrun()
show_task = dag.get_task("show")
assert show_task.get_parse_time_mapped_ti_count() == 6
mapped_tis, max_map_index = TaskMap.expand_mapped_task(show_task, dag_run.run_id, session=session)
assert len(mapped_tis) == 0 # Expanded at parse!
assert max_map_index == 5
tis = (
session.query(TaskInstance)
.filter(
TaskInstance.dag_id == dag.dag_id,
TaskInstance.task_id == "show",
TaskInstance.run_id == dag_run.run_id,
)
.order_by(TaskInstance.map_index)
.all()
)
for ti in tis:
ti.refresh_from_task(show_task)
dag_maker.run_ti(ti.task_id, map_index=ti.map_index, dag_run=dag_run, session=session)
assert outputs == [(2, 5), (2, 10), (4, 5), (4, 10), (8, 5), (8, 10)]
def test_map_in_group(self, tmp_path: pathlib.Path, dag_maker, session):
out = tmp_path.joinpath("out")
out.touch()
with dag_maker(dag_id="in_group", session=session, serialized=True) as dag:
@dag.task
def envs():
return [{"VAR1": "FOO"}, {"VAR1": "BAR"}]
@dag.task
def cmds():
return [f'echo "hello $VAR1" >> {out}', f'echo "goodbye $VAR1" >> {out}']
with TaskGroup(group_id="dynamic"):
BashOperator.partial(task_id="bash", do_xcom_push=False).expand(
env=envs(),
bash_command=cmds(),
)
dag_run: DagRun = dag_maker.create_dagrun()
original_tis = {ti.task_id: ti for ti in dag_run.get_task_instances(session=session)}
for task_id in ["dynamic.envs", "dynamic.cmds"]:
ti = original_tis[task_id]
ti.refresh_from_task(dag.get_task(task_id))
dag_maker.run_ti(ti.task_id, map_index=ti.map_index, dag_run=dag_run, session=session)
bash_task = dag.get_task("dynamic.bash")
mapped_bash_tis, max_map_index = TaskMap.expand_mapped_task(
bash_task, dag_run.run_id, session=session
)
assert max_map_index == 3 # 2 * 2 mapped tasks.
for ti in sorted(mapped_bash_tis, key=operator.attrgetter("map_index")):
ti.refresh_from_task(bash_task)
dag_maker.run_ti(ti.task_id, map_index=ti.map_index, dag_run=dag_run, session=session)
with out.open() as f:
out_lines = [line.strip() for line in f]
assert out_lines == ["hello FOO", "goodbye FOO", "hello BAR", "goodbye BAR"]
def _get_lazy_xcom_access_expected_sql_lines() -> list[str]:
backend = os.environ.get("BACKEND")
if backend == "mysql":
return [
"SELECT xcom.value",
"FROM xcom",
"WHERE xcom.dag_id = 'test_dag' AND xcom.run_id = 'test' "
"AND xcom.task_id = 't' AND xcom.map_index = -1 AND xcom.`key` = 'xxx'",
]
if backend == "postgres":
return [
"SELECT xcom.value",
"FROM xcom",
"WHERE xcom.dag_id = 'test_dag' AND xcom.run_id = 'test' "
"AND xcom.task_id = 't' AND xcom.map_index = -1 AND xcom.key = 'xxx'",
]
if backend == "sqlite":
return [
"SELECT xcom.value",
"FROM xcom",
"WHERE xcom.dag_id = 'test_dag' AND xcom.run_id = 'test' "
"AND xcom.task_id = 't' AND xcom.map_index = -1 AND xcom.\"key\" = 'xxx'",
]
raise RuntimeError(f"unknown backend {backend!r}")
def test_expand_non_templated_field(dag_maker, session):
"""Test expand on non-templated fields sets upstream deps properly."""
class SimpleBashOperator(BashOperator):
template_fields = ()
with dag_maker(dag_id="product_same_types", session=session) as dag:
@dag.task
def get_extra_env():
return [{"foo": "bar"}, {"foo": "biz"}]
SimpleBashOperator.partial(task_id="echo", bash_command="echo $FOO").expand(env=get_extra_env())
dag_maker.create_dagrun()
echo_task = dag.get_task("echo")
assert "get_extra_env" in echo_task.upstream_task_ids
def test_taskinstance_with_note(create_task_instance, session):
ti: TaskInstance = create_task_instance(session=session)
ti.note = "ti with note"
session.add(ti)
session.commit()
ti_note: TaskInstanceNote = session.query(TaskInstanceNote).filter_by(ti_id=ti.id).one()
assert ti_note.content == "ti with note"
session.delete(ti)
session.commit()
assert session.query(TaskInstance).filter_by(id=ti.id).one_or_none() is None
assert session.query(TaskInstanceNote).filter_by(ti_id=ti.id).one_or_none() is None
def test__refresh_from_db_should_not_increment_try_number(dag_maker, session):
with dag_maker():
BashOperator(task_id="hello", bash_command="hi")
dag_maker.create_dagrun(state="success")
ti = session.scalar(select(TaskInstance))
session.get(TaskInstance, ti.id).try_number += 1
session.commit()
assert ti.task_id == "hello" # just to confirm...
assert ti.try_number == 1 # starts out as 1
ti.refresh_from_db()
assert ti.try_number == 1 # stays 1
def test_delete_dagversion_restricted_when_taskinstance_exists(dag_maker, session):
"""
Ensure that deleting a DagVersion with existing TaskInstance references is restricted (ON DELETE RESTRICT).
"""
with dag_maker(dag_id="test_dag_restrict", session=session) as dag:
EmptyOperator(task_id="task1")
dag_maker.create_dagrun(session=session)
version = session.scalar(select(DagVersion).where(DagVersion.dag_id == dag.dag_id))
assert version is not None
ti = session.scalars(select(TaskInstance).where(TaskInstance.dag_version_id == version.id)).first()
assert ti is not None
session.delete(version)
with pytest.raises(IntegrityError):
session.commit()
@pytest.mark.parametrize(
("normal_tasks", "mapped_tasks", "expected"),
[
# 4 is just a regular task so it depends on all its upstreams.
pytest.param(["4"], [], {"1", "2", "3"}, id="nonmapped"),
# 3 is a mapped; it depends on all tis of the mapped upstream 2.
pytest.param(["3"], [], {"1", "2"}, id="mapped-whole"),
# Every ti of a mapped task depends on all tis of the mapped upstream.
pytest.param([], [("3", 1)], {"1", "2"}, id="mapped-one"),
# Same as the (non-group) unmapped case, d depends on all upstreams.
pytest.param(["d"], [], {"a", "b", "c"}, id="group-nonmapped"),
# This specifies c tis in ALL mapped task groups, so all b tis are needed.
pytest.param(["c"], [], {"a", "b"}, id="group-mapped-whole"),
# This only specifies one c ti, so only one b ti from the same mapped instance is returned.
pytest.param([], [("c", 1)], {"a", ("b", 1)}, id="group-mapped-one"),
],
)
def test_find_relevant_relatives(dag_maker, session, normal_tasks, mapped_tasks, expected):
# 1 -> 2[] -> 3[] -> 4
#
# a -> " b --> c " -> d
# "== g[] =="
with dag_maker(session=session) as dag:
t1 = EmptyOperator(task_id="1")
t2 = MockOperator.partial(task_id="2").expand(arg1=["x", "y"])
t3 = MockOperator.partial(task_id="3").expand(arg1=["x", "y"])
t4 = EmptyOperator(task_id="4")
t1 >> t2 >> t3 >> t4
ta = EmptyOperator(task_id="a")
@task_group(prefix_group_id=False)
def g(v):
tb = MockOperator(task_id="b", arg1=v)
tc = MockOperator(task_id="c", arg1=v)
tb >> tc
td = EmptyOperator(task_id="d")
ta >> g.expand(v=["x", "y", "z"]) >> td
dr = dag_maker.create_dagrun(state="success")
result = find_relevant_relatives(
normal_tasks=normal_tasks,
mapped_tasks=mapped_tasks,
direction="upstream",
dag=dag,
run_id=dr.run_id,
session=session,
)
assert result == expected
| TestMappedTaskInstanceReceiveValue |
python | getsentry__sentry | tests/sentry/middleware/test_access_log_middleware.py | {
"start": 5711,
"end": 6461
} | class ____(APITestCase):
@pytest.fixture(autouse=True)
def inject_fixtures(self, caplog: pytest.LogCaptureFixture):
self._caplog = caplog
def assert_access_log_recorded(self):
sentinel = object()
for record in self.captured_logs:
for field in required_access_log_fields:
assert getattr(record, field, sentinel) != sentinel, field
@property
def captured_logs(self):
return [r for r in self._caplog.records if r.name == "sentry.access.api"]
def get_tested_log(self, **kwargs):
tested_log_path = unquote(reverse(self.endpoint, **kwargs))
return next(log for log in self.captured_logs if log.path == tested_log_path)
@all_silo_test
| LogCaptureAPITestCase |
python | django__django | tests/forms_tests/tests/test_forms.py | {
"start": 1631,
"end": 1825
} | class ____(Form):
name = CharField()
composers = MultipleChoiceField(
choices=[("J", "John Lennon"), ("P", "Paul McCartney")],
widget=CheckboxSelectMultiple,
)
| SongForm |
python | pytorch__pytorch | torch/_inductor/pattern_matcher.py | {
"start": 16996,
"end": 17627
} | class ____(PatternExpr):
"""
Capture a kwarg which will become an input to the handler.
"""
def __init__(self, name: str) -> None:
super().__init__()
self.name = name
def __repr__(self) -> str:
return f"KeywordArg({self.name!r})"
def _match(self, node: NodeOrConstant, ctx: MatchContext) -> MatchResult:
return Match(ctx, self, kwargs={self.name: node}) # matches anything
def pattern_eq(self, other: Any) -> bool:
other = typing.cast(Self, other) # super makes sure this is true
return super().pattern_eq(other) and self.name == other.name
| KeywordArg |
python | sphinx-doc__sphinx | sphinx/domains/cpp/_ast.py | {
"start": 36945,
"end": 37870
} | class ____(ASTExpression):
def __init__(self, typ: ASTType) -> None:
self.typ = typ
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTAlignofExpr):
return NotImplemented
return self.typ == other.typ
def __hash__(self) -> int:
return hash(self.typ)
def _stringify(self, transform: StringifyTransform) -> str:
return 'alignof(' + transform(self.typ) + ')'
def get_id(self, version: int) -> str:
return 'at' + self.typ.get_id(version)
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
signode += addnodes.desc_sig_keyword('alignof', 'alignof')
signode += addnodes.desc_sig_punctuation('(', '(')
self.typ.describe_signature(signode, mode, env, symbol)
signode += addnodes.desc_sig_punctuation(')', ')')
| ASTAlignofExpr |
python | ray-project__ray | doc/source/serve/doc_code/autoscale_model_comp_example.py | {
"start": 495,
"end": 1036
} | class ____:
def __init__(self, a_handle, b_handle):
self.a_handle: DeploymentHandle = a_handle
self.b_handle: DeploymentHandle = b_handle
async def __call__(self) -> str:
a_future = self.a_handle.remote()
b_future = self.b_handle.remote()
return (await a_future), (await b_future)
app = Driver.bind(HeavyLoad.bind(), LightLoad.bind())
# __serve_example_end__
import requests # noqa
serve.run(app)
resp = requests.post("http://localhost:8000")
assert resp.json() == ["heavy", "light"]
| Driver |
python | mlflow__mlflow | tests/resources/db/initial_models.py | {
"start": 978,
"end": 2271
} | class ____(Base):
"""
DB model for :py:class:`mlflow.entities.Experiment`. These are recorded in ``experiment`` table.
"""
__tablename__ = "experiments"
experiment_id = Column(Integer, autoincrement=True)
"""
Experiment ID: `Integer`. *Primary Key* for ``experiment`` table.
"""
name = Column(String(256), unique=True, nullable=False)
"""
Experiment name: `String` (limit 256 characters). Defined as *Unique* and *Non null* in
table schema.
"""
artifact_location = Column(String(256), nullable=True)
"""
Default artifact location for this experiment: `String` (limit 256 characters). Defined as
*Non null* in table schema.
"""
lifecycle_stage = Column(String(32), default="active")
"""
Lifecycle Stage of experiment: `String` (limit 32 characters).
Can be either ``active`` (default) or ``deleted``.
"""
__table_args__ = (
CheckConstraint(lifecycle_stage.in_(["active", "deleted"]), name="lifecycle_stage"),
PrimaryKeyConstraint("experiment_id", name="experiment_pk"),
)
def __repr__(self):
return f"<SqlExperiment ({self.experiment_id}, {self.name})>"
| SqlExperiment |
python | joke2k__faker | tests/providers/test_person.py | {
"start": 17585,
"end": 18264
} | class ____(unittest.TestCase):
"""Tests person in the en_IN locale"""
def setUp(self):
self.fake = Faker("en_IN")
Faker.seed(0)
def test_first_name(self):
"""Verify that gender specific names are set correctly"""
name = self.fake.first_name_female()
assert name in EnINProvider.first_names_female
name = self.fake.first_name_male()
assert name in EnINProvider.first_names_male
first_name = self.fake.first_name()
assert first_name in EnINProvider.first_names
def test_last_name(self):
last_name = self.fake.last_name()
assert last_name in EnINProvider.last_names
| TestEnIN |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.